summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorChris Wilson <chris@chris-wilson.co.uk>2011-12-10 22:45:25 +0000
committerChris Wilson <chris@chris-wilson.co.uk>2011-12-11 00:52:54 +0000
commit051a18063df075536cb1ac0dc4dfc3c1306ab74e (patch)
treec485da7f3349fe814b863482a54d642ca9a4a92b
parent735a15208dd600eefa3090f344186df9cac0462d (diff)
sna: Implement a VMA cache
A VMA cache appears unavoidable thanks to compiz and an excrutiatingly slow GTT pagefault, though it does look like it will be ineffectual during everyday usage. Compiz (and presumably other compositing managers) appears to be undoing all the pagefault minimisation as demonstrated on gen5 with large XPutImage. It also appears the CPU to memory bandwidth ratio plays a crucial role in determining whether going straight to GTT or through the CPU cache is a win - so no trivial heuristic. x11perf -putimage10 -putimage500 on i5-2467m: Before: bare: 1150,000 2,410 compiz: 438,000 2,670 After: bare: 1190,000 2,730 compiz: 437,000 2,690 UXA: bare: 658,000 2,670 compiz: 389,000 2,520 On i3-330m Before: bare: 537,000 1,080 compiz: 263,000 398 After: bare: 606,000 1,360 compiz: 203,000 985 UXA: bare: 294,000 1,070 compiz: 197,000 821 On pnv: Before: bare: 179,000 213 compiz: 106,000 123 After: bare: 181,000 246 compiz: 103,000 197 UXA: bare: 114,000 312 compiz: 75,700 191 Reported-by: Michael Larabel <Michael@phoronix.com> Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
-rw-r--r--src/sna/kgem.c90
-rw-r--r--src/sna/kgem.h5
-rw-r--r--src/sna/kgem_debug_gen3.c4
-rw-r--r--src/sna/kgem_debug_gen4.c8
-rw-r--r--src/sna/kgem_debug_gen5.c8
-rw-r--r--src/sna/kgem_debug_gen6.c10
-rw-r--r--src/sna/kgem_debug_gen7.c10
-rw-r--r--src/sna/sna_accel.c9
-rw-r--r--src/sna/sna_io.c5
-rw-r--r--src/sna/sna_video.c1
10 files changed, 107 insertions, 43 deletions
diff --git a/src/sna/kgem.c b/src/sna/kgem.c
index 68a1831b..3609a6f3 100644
--- a/src/sna/kgem.c
+++ b/src/sna/kgem.c
@@ -47,2 +47,8 @@ static inline void list_move(struct list *list, struct list *head)
+static inline void list_move_tail(struct list *list, struct list *head)
+{
+ __list_del(list->prev, list->next);
+ list_add_tail(list, head);
+}
+
static inline void list_replace(struct list *old,
@@ -77,2 +83,3 @@ static inline void list_replace(struct list *old,
#define PAGE_SIZE 4096
+#define MAX_VMA_CACHE 128
@@ -127,3 +134,2 @@ static void *gem_mmap(int fd, uint32_t handle, int size, int prot)
struct drm_i915_gem_mmap_gtt mmap_arg;
- struct drm_i915_gem_set_domain set_domain;
void *ptr;
@@ -146,8 +152,2 @@ static void *gem_mmap(int fd, uint32_t handle, int size, int prot)
- VG_CLEAR(set_domain);
- set_domain.handle = handle;
- set_domain.read_domains = I915_GEM_DOMAIN_GTT;
- set_domain.write_domain = prot & PROT_WRITE ? I915_GEM_DOMAIN_GTT : 0;
- drmIoctl(fd, DRM_IOCTL_I915_GEM_SET_DOMAIN, &set_domain);
-
return ptr;
@@ -276,2 +276,3 @@ static struct kgem_bo *__kgem_bo_init(struct kgem_bo *bo,
list_init(&bo->list);
+ list_init(&bo->vma);
@@ -354,2 +355,3 @@ void kgem_init(struct kgem *kgem, int fd, struct pci_device *dev, int gen)
list_init(&kgem->flushing);
+ list_init(&kgem->vma_cache);
for (i = 0; i < ARRAY_SIZE(kgem->inactive); i++)
@@ -596,2 +598,8 @@ static void kgem_bo_free(struct kgem *kgem, struct kgem_bo *bo)
+ if (bo->map) {
+ munmap(bo->map, bo->size);
+ list_del(&bo->vma);
+ kgem->vma_count--;
+ }
+
list_del(&bo->list);
@@ -622,2 +630,3 @@ static void __kgem_bo_destroy(struct kgem *kgem, struct kgem_bo *bo)
list_replace(&bo->request, &base->request);
+ list_replace(&bo->vma, &base->vma);
free(bo);
@@ -1816,7 +1825,51 @@ void *kgem_bo_map(struct kgem *kgem, struct kgem_bo *bo, int prot)
- ptr = gem_mmap(kgem->fd, bo->handle, bo->size, prot);
- if (ptr == NULL)
- return NULL;
+ ptr = bo->map;
+ if (ptr == NULL) {
+ /* vma are limited on a per-process basis to around 64k.
+ * This includes all malloc arenas as well as other file
+ * mappings. In order to be fair and not hog the cache,
+ * and more importantly not to exhaust that limit and to
+ * start failing mappings, we keep our own number of open
+ * vma to within a conservative value.
+ */
+ while (kgem->vma_count > MAX_VMA_CACHE) {
+ struct kgem_bo *old;
+
+ old = list_first_entry(&kgem->vma_cache,
+ struct kgem_bo,
+ vma);
+ DBG(("%s: discarding vma cache for %d\n",
+ __FUNCTION__, old->handle));
+ munmap(old->map, old->size);
+ old->map = NULL;
+ list_del(&old->vma);
+ kgem->vma_count--;
+ }
+
+ ptr = gem_mmap(kgem->fd, bo->handle, bo->size,
+ PROT_READ | PROT_WRITE);
+ if (ptr == NULL)
+ return NULL;
+
+ /* Cache this mapping to avoid the overhead of an
+ * excruciatingly slow GTT pagefault. This is more an
+ * issue with compositing managers which need to frequently
+ * flush CPU damage to their GPU bo.
+ */
+ bo->map = ptr;
+ kgem->vma_count++;
+
+ DBG(("%s: caching vma for %d\n",
+ __FUNCTION__, bo->handle));
+ }
+
+ if (bo->needs_flush | bo->gpu) {
+ struct drm_i915_gem_set_domain set_domain;
+
+ VG_CLEAR(set_domain);
+ set_domain.handle = bo->handle;
+ set_domain.read_domains = I915_GEM_DOMAIN_GTT;
+ set_domain.write_domain = prot & PROT_WRITE ? I915_GEM_DOMAIN_GTT : 0;
+ drmIoctl(kgem->fd, DRM_IOCTL_I915_GEM_SET_DOMAIN, &set_domain);
- if (prot & PROT_WRITE) {
bo->needs_flush = false;
@@ -1826,2 +1879,4 @@ void *kgem_bo_map(struct kgem *kgem, struct kgem_bo *bo, int prot)
+ list_move_tail(&bo->vma, &kgem->vma_cache);
+
return ptr;
@@ -1829,2 +1884,13 @@ void *kgem_bo_map(struct kgem *kgem, struct kgem_bo *bo, int prot)
+void kgem_bo_unmap(struct kgem *kgem, struct kgem_bo *bo)
+{
+ assert(bo->map);
+
+ munmap(bo->map, bo->size);
+ bo->map = NULL;
+
+ list_del(&bo->vma);
+ kgem->vma_count--;
+}
+
uint32_t kgem_bo_flink(struct kgem *kgem, struct kgem_bo *bo)
@@ -2153,2 +2219,4 @@ struct kgem_bo *kgem_create_buffer(struct kgem *kgem,
list_init(&bo->base.request);
+ list_replace(&old->vma,
+ &bo->base.vma);
free(old);
diff --git a/src/sna/kgem.h b/src/sna/kgem.h
index e9e7cdcb..0d85f643 100644
--- a/src/sna/kgem.h
+++ b/src/sna/kgem.h
@@ -49,3 +49,5 @@ struct kgem_bo {
struct list request;
+ struct list vma;
+ void *map;
struct kgem_request *rq;
@@ -105,2 +107,3 @@ struct kgem {
struct list requests;
+ struct list vma_cache;
struct kgem_request *next_request;
@@ -112,2 +115,3 @@ struct kgem {
uint16_t nfence;
+ uint16_t vma_count;
@@ -316,2 +320,3 @@ uint32_t kgem_add_reloc(struct kgem *kgem,
void *kgem_bo_map(struct kgem *kgem, struct kgem_bo *bo, int prot);
+void kgem_bo_unmap(struct kgem *kgem, struct kgem_bo *bo);
uint32_t kgem_bo_flink(struct kgem *kgem, struct kgem_bo *bo);
diff --git a/src/sna/kgem_debug_gen3.c b/src/sna/kgem_debug_gen3.c
index d152b608..0238b734 100644
--- a/src/sna/kgem_debug_gen3.c
+++ b/src/sna/kgem_debug_gen3.c
@@ -104,3 +104,3 @@ static void gen3_update_vertex_buffer_addr(struct kgem *kgem,
if (state.vb.current)
- munmap(state.vb.base, state.vb.current->size);
+ kgem_bo_unmap(kgem, state.vb.current);
@@ -1615,3 +1615,3 @@ void kgem_gen3_finish_state(struct kgem *kgem)
if (state.vb.current)
- munmap(state.vb.base, state.vb.current->size);
+ kgem_bo_unmap(kgem, state.vb.current);
diff --git a/src/sna/kgem_debug_gen4.c b/src/sna/kgem_debug_gen4.c
index d736cbd9..0f91d29a 100644
--- a/src/sna/kgem_debug_gen4.c
+++ b/src/sna/kgem_debug_gen4.c
@@ -92,3 +92,3 @@ static void gen4_update_vertex_buffer(struct kgem *kgem, const uint32_t *data)
if (state.vb[i].current)
- munmap(state.vb[i].base, state.vb[i].current->size);
+ kgem_bo_unmap(kgem, state.vb[i].current);
@@ -422,3 +422,3 @@ put_reloc(struct kgem *kgem, struct reloc *r)
if (r->bo != NULL)
- munmap(r->base, r->bo->size);
+ kgem_bo_unmap(kgem, r->bo);
}
@@ -699,3 +699,3 @@ static void finish_vertex_buffers(struct kgem *kgem)
if (state.vb[i].current)
- munmap(state.vb[i].base, state.vb[i].current->size);
+ kgem_bo_unmap(kgem, state.vb[i].current);
}
@@ -707,3 +707,3 @@ void kgem_gen4_finish_state(struct kgem *kgem)
if (state.dynamic_state.current)
- munmap(state.dynamic_state.base, state.dynamic_state.current->size);
+ kgem_bo_unmap(kgem, state.dynamic_state.base);
diff --git a/src/sna/kgem_debug_gen5.c b/src/sna/kgem_debug_gen5.c
index 78ba4432..c4f5df15 100644
--- a/src/sna/kgem_debug_gen5.c
+++ b/src/sna/kgem_debug_gen5.c
@@ -87,3 +87,3 @@ static void gen5_update_vertex_buffer(struct kgem *kgem, const uint32_t *data)
if (state.vb[i].current)
- munmap(state.vb[i].base, state.vb[i].current->size);
+ kgem_bo_unmap(kgem, state.vb[i].current);
@@ -396,3 +396,3 @@ put_reloc(struct kgem *kgem, struct reloc *r)
if (r->bo != NULL)
- munmap(r->base, r->bo->size);
+ kgem_bo_umap(kgem, r->bo);
}
@@ -675,3 +675,3 @@ static void finish_vertex_buffers(struct kgem *kgem)
if (state.vb[i].current)
- munmap(state.vb[i].base, state.vb[i].current->size);
+ kgem_bo_unmap(kgem, state.vb[i].current);
}
@@ -683,3 +683,3 @@ void kgem_gen5_finish_state(struct kgem *kgem)
if (state.dynamic_state.current)
- munmap(state.dynamic_state.base, state.dynamic_state.current->size);
+ kgem_bo_unmap(kgem,state. dynamic_state.current);
diff --git a/src/sna/kgem_debug_gen6.c b/src/sna/kgem_debug_gen6.c
index d441b536..5bcd85dc 100644
--- a/src/sna/kgem_debug_gen6.c
+++ b/src/sna/kgem_debug_gen6.c
@@ -91,3 +91,3 @@ static void gen6_update_vertex_buffer(struct kgem *kgem, const uint32_t *data)
if (state.vb[i].current)
- munmap(state.vb[i].base, state.vb[i].current->size);
+ kgem_bo_unmap(kgem, state.vb[i].current);
@@ -132,3 +132,3 @@ static void gen6_update_dynamic_buffer(struct kgem *kgem, const uint32_t offset)
if (state.dynamic_state.current)
- munmap(state.dynamic_state.base, state.dynamic_state.current->size);
+ kgem_bo_unmap(kgem, state.dynamic_state.current);
@@ -308,3 +308,3 @@ static void finish_vertex_buffers(struct kgem *kgem)
if (state.vb[i].current)
- munmap(state.vb[i].base, state.vb[i].current->size);
+ kgem_bo_unmap(kgem, state.vb[i].current);
}
@@ -316,3 +316,3 @@ static void finish_state(struct kgem *kgem)
if (state.dynamic_state.current)
- munmap(state.dynamic_state.base, state.dynamic_state.current->size);
+ kgem_bo_unmap(kgem, state.dynamic_state.base);
@@ -484,3 +484,3 @@ put_reloc(struct kgem *kgem, struct reloc *r)
if (r->bo != NULL)
- munmap(r->base, r->bo->size);
+ kgem_bo_unmap(kgem, r->bo);
}
diff --git a/src/sna/kgem_debug_gen7.c b/src/sna/kgem_debug_gen7.c
index f6a49752..a33a918d 100644
--- a/src/sna/kgem_debug_gen7.c
+++ b/src/sna/kgem_debug_gen7.c
@@ -91,3 +91,3 @@ static void gen7_update_vertex_buffer(struct kgem *kgem, const uint32_t *data)
if (state.vb[i].current)
- munmap(state.vb[i].base, state.vb[i].current->size);
+ kgem_bo_unmap(kgem, state.vb[i].base);
@@ -132,3 +132,3 @@ static void gen7_update_dynamic_buffer(struct kgem *kgem, const uint32_t offset)
if (state.dynamic_state.current)
- munmap(state.dynamic_state.base, state.dynamic_state.current->size);
+ kgem_bo_unmap(kgem, state.dynamic_state.base);
@@ -308,3 +308,3 @@ static void finish_vertex_buffers(struct kgem *kgem)
if (state.vb[i].current)
- munmap(state.vb[i].base, state.vb[i].current->size);
+ kgem_bo_unmap(kgem, state.vb[i].current);
}
@@ -316,3 +316,3 @@ static void finish_state(struct kgem *kgem)
if (state.dynamic_state.current)
- munmap(state.dynamic_state.base, state.dynamic_state.current->size);
+ kgem_bo_unmap(kgem, state.dynamic_state.base);
@@ -484,3 +484,3 @@ put_reloc(struct kgem *kgem, struct reloc *r)
if (r->bo != NULL)
- munmap(r->base, r->bo->size);
+ kgem_bo_unmap(kgem, r->bo);
}
diff --git a/src/sna/sna_accel.c b/src/sna/sna_accel.c
index bb52770b..44580be1 100644
--- a/src/sna/sna_accel.c
+++ b/src/sna/sna_accel.c
@@ -189,5 +189,2 @@ static Bool sna_destroy_private(PixmapPtr pixmap, struct sna_pixmap *priv)
- if (priv->mapped)
- munmap(pixmap->devPrivate.ptr, priv->gpu_bo->size);
-
/* Always release the gpu bo back to the lower levels of caching */
@@ -1409,5 +1406,6 @@ sna_put_zpixmap_blt(DrawablePtr drawable, GCPtr gc, RegionPtr region,
* for putimage10 on gen6 -- mostly due to slow page faulting in kernel.
+ * So we try again with vma caching and only for pixmaps who will be
+ * immediately flushed...
*/
-#if 0
- if (priv->gpu_bo->rq == NULL &&
+ if (priv->flush &&
sna_put_image_upload_blt(drawable, gc, region,
@@ -1427,3 +1425,2 @@ sna_put_zpixmap_blt(DrawablePtr drawable, GCPtr gc, RegionPtr region,
}
-#endif
diff --git a/src/sna/sna_io.c b/src/sna/sna_io.c
index aba636cc..767824fa 100644
--- a/src/sna/sna_io.c
+++ b/src/sna/sna_io.c
@@ -82,4 +82,2 @@ static void read_boxes_inplace(struct kgem *kgem,
} while (--n);
-
- munmap(src, bo->size);
}
@@ -285,4 +283,2 @@ static void write_boxes_inplace(struct kgem *kgem,
} while (--n);
-
- munmap(dst, bo->size);
}
@@ -466,3 +462,2 @@ struct kgem_bo *sna_replace(struct sna *sna,
pixmap->drawable.height);
- munmap(dst, bo->size);
}
diff --git a/src/sna/sna_video.c b/src/sna/sna_video.c
index bd5ff14a..d6d56f40 100644
--- a/src/sna/sna_video.c
+++ b/src/sna/sna_video.c
@@ -483,3 +483,2 @@ sna_video_copy_data(struct sna *sna,
- munmap(dst, frame->bo->size);
return TRUE;