summaryrefslogtreecommitdiff
path: root/src/gallium/winsys/svga/drm/vmw_fence.c
diff options
context:
space:
mode:
Diffstat (limited to 'src/gallium/winsys/svga/drm/vmw_fence.c')
-rw-r--r--src/gallium/winsys/svga/drm/vmw_fence.c191
1 files changed, 151 insertions, 40 deletions
diff --git a/src/gallium/winsys/svga/drm/vmw_fence.c b/src/gallium/winsys/svga/drm/vmw_fence.c
index 754f8a666df..8af2250b7ea 100644
--- a/src/gallium/winsys/svga/drm/vmw_fence.c
+++ b/src/gallium/winsys/svga/drm/vmw_fence.c
@@ -22,20 +22,10 @@
* SOFTWARE.
*
**********************************************************/
-/*
- * TODO:
- *
- * Fencing is currently a bit inefficient, since we need to call the
- * kernel do determine a fence object signaled status if the fence is not
- * signaled. This can be greatly improved upon by using the fact that the
- * execbuf ioctl returns the last signaled fence seqno, as does the
- * fence signaled ioctl. We should set up a ring of fence objects and
- * walk through them checking for signaled status each time we receive a
- * new passed fence seqno.
- */
-
#include "util/u_memory.h"
#include "util/u_atomic.h"
+#include "util/u_double_list.h"
+#include "os/os_thread.h"
#include "pipebuffer/pb_buffer_fenced.h"
@@ -44,20 +34,129 @@
struct vmw_fence_ops
{
+ /*
+ * Immutable members.
+ */
struct pb_fence_ops base;
-
struct vmw_winsys_screen *vws;
+
+ pipe_mutex mutex;
+
+ /*
+ * Protected by mutex;
+ */
+ struct list_head not_signaled;
+ uint32_t last_signaled;
+ uint32_t last_emitted;
};
struct vmw_fence
{
+ struct list_head ops_list;
int32_t refcount;
uint32_t handle;
uint32_t mask;
int32_t signalled;
+ uint32_t seqno;
};
/**
+ * vmw_fence_seq_is_signaled - Check whether a fence seqno is
+ * signaled.
+ *
+ * @ops: Pointer to a struct pb_fence_ops.
+ *
+ */
+static INLINE boolean
+vmw_fence_seq_is_signaled(uint32_t seq, uint32_t last, uint32_t cur)
+{
+ return (cur - last <= cur - seq);
+}
+
+
+/**
+ * vmw_fence_ops - Return the vmw_fence_ops structure backing a
+ * struct pb_fence_ops pointer.
+ *
+ * @ops: Pointer to a struct pb_fence_ops.
+ *
+ */
+static INLINE struct vmw_fence_ops *
+vmw_fence_ops(struct pb_fence_ops *ops)
+{
+ assert(ops);
+ return (struct vmw_fence_ops *)ops;
+}
+
+
+/**
+ * vmw_fences_release - Release all fences from the not_signaled
+ * list.
+ *
+ * @ops: Pointer to a struct vmw_fence_ops.
+ *
+ */
+static void
+vmw_fences_release(struct vmw_fence_ops *ops)
+{
+ struct vmw_fence *fence, *n;
+
+ pipe_mutex_lock(ops->mutex);
+ LIST_FOR_EACH_ENTRY_SAFE(fence, n, &ops->not_signaled, ops_list)
+ LIST_DELINIT(&fence->ops_list);
+ pipe_mutex_unlock(ops->mutex);
+}
+
+/**
+ * vmw_fences_signal - Traverse the not_signaled list and try to
+ * signal unsignaled fences.
+ *
+ * @ops: Pointer to a struct pb_fence_ops.
+ * @signaled: Seqno that has signaled.
+ * @emitted: Last seqno emitted by the kernel.
+ * @has_emitted: Whether we provide the emitted value.
+ *
+ */
+void
+vmw_fences_signal(struct pb_fence_ops *fence_ops,
+ uint32_t signaled,
+ uint32_t emitted,
+ boolean has_emitted)
+{
+ struct vmw_fence_ops *ops = NULL;
+ struct vmw_fence *fence, *n;
+
+ if (fence_ops == NULL)
+ return;
+
+ ops = vmw_fence_ops(fence_ops);
+ pipe_mutex_lock(ops->mutex);
+
+ if (!has_emitted) {
+ emitted = ops->last_emitted;
+ if (emitted - signaled > (1 << 30))
+ emitted = signaled;
+ }
+
+ if (signaled == ops->last_signaled && emitted == ops->last_emitted)
+ goto out_unlock;
+
+ LIST_FOR_EACH_ENTRY_SAFE(fence, n, &ops->not_signaled, ops_list) {
+ if (!vmw_fence_seq_is_signaled(fence->seqno, signaled, emitted))
+ break;
+
+ p_atomic_set(&fence->signalled, 1);
+ LIST_DELINIT(&fence->ops_list);
+ }
+ ops->last_signaled = signaled;
+ ops->last_emitted = emitted;
+
+out_unlock:
+ pipe_mutex_unlock(ops->mutex);
+}
+
+
+/**
* vmw_fence - return the vmw_fence object identified by a
* struct pipe_fence_handle *
*
@@ -69,18 +168,22 @@ vmw_fence(struct pipe_fence_handle *fence)
return (struct vmw_fence *) fence;
}
+
/**
* vmw_fence_create - Create a user-space fence object.
*
+ * @fence_ops: The fence_ops manager to register with.
* @handle: Handle identifying the kernel fence object.
* @mask: Mask of flags that this fence object may signal.
*
* Returns NULL on failure.
*/
struct pipe_fence_handle *
-vmw_fence_create(uint32_t handle, uint32_t mask)
+vmw_fence_create(struct pb_fence_ops *fence_ops, uint32_t handle,
+ uint32_t seqno, uint32_t mask)
{
struct vmw_fence *fence = CALLOC_STRUCT(vmw_fence);
+ struct vmw_fence_ops *ops = vmw_fence_ops(fence_ops);
if (!fence)
return NULL;
@@ -88,27 +191,24 @@ vmw_fence_create(uint32_t handle, uint32_t mask)
p_atomic_set(&fence->refcount, 1);
fence->handle = handle;
fence->mask = mask;
+ fence->seqno = seqno;
p_atomic_set(&fence->signalled, 0);
+ pipe_mutex_lock(ops->mutex);
+
+ if (vmw_fence_seq_is_signaled(seqno, ops->last_signaled, seqno)) {
+ p_atomic_set(&fence->signalled, 1);
+ LIST_INITHEAD(&fence->ops_list);
+ } else {
+ p_atomic_set(&fence->signalled, 0);
+ LIST_ADDTAIL(&fence->ops_list, &ops->not_signaled);
+ }
- return (struct pipe_fence_handle *) fence;
-}
+ pipe_mutex_unlock(ops->mutex);
-/**
- * vmw_fence_ops - Return the vmw_fence_ops structure backing a
- * struct pb_fence_ops pointer.
- *
- * @ops: Pointer to a struct pb_fence_ops.
- *
- */
-static INLINE struct vmw_fence_ops *
-vmw_fence_ops(struct pb_fence_ops *ops)
-{
- assert(ops);
- return (struct vmw_fence_ops *)ops;
+ return (struct pipe_fence_handle *) fence;
}
-
/**
* vmw_fence_reference - Reference / unreference a vmw fence object.
*
@@ -125,7 +225,14 @@ vmw_fence_reference(struct vmw_winsys_screen *vws,
struct vmw_fence *vfence = vmw_fence(*ptr);
if (p_atomic_dec_zero(&vfence->refcount)) {
+ struct vmw_fence_ops *ops = vmw_fence_ops(vws->fence_ops);
+
vmw_ioctl_fence_unref(vws, vfence->handle);
+
+ pipe_mutex_lock(ops->mutex);
+ LIST_DELINIT(&vfence->ops_list);
+ pipe_mutex_unlock(ops->mutex);
+
FREE(vfence);
}
}
@@ -171,18 +278,21 @@ vmw_fence_signalled(struct vmw_winsys_screen *vws,
if ((old & vflags) == vflags)
return 0;
+ /*
+ * Currently we update signaled fences on each execbuf call.
+ * That should really be sufficient, and we can avoid
+ * a lot of kernel calls this way.
+ */
+#if 1
ret = vmw_ioctl_fence_signalled(vws, vfence->handle, vflags);
- if (ret == 0) {
- int32_t prev = old;
-
- do {
- old = prev;
- prev = p_atomic_cmpxchg(&vfence->signalled, old, old | vflags);
- } while (prev != old);
- }
-
+ if (ret == 0)
+ p_atomic_set(&vfence->signalled, 1);
return ret;
+#else
+ (void) ret;
+ return -1;
+#endif
}
/**
@@ -287,6 +397,7 @@ vmw_fence_ops_fence_finish(struct pb_fence_ops *ops,
static void
vmw_fence_ops_destroy(struct pb_fence_ops *ops)
{
+ vmw_fences_release(vmw_fence_ops(ops));
FREE(ops);
}
@@ -310,6 +421,8 @@ vmw_fence_ops_create(struct vmw_winsys_screen *vws)
if(!ops)
return NULL;
+ pipe_mutex_init(ops->mutex);
+ LIST_INITHEAD(&ops->not_signaled);
ops->base.destroy = &vmw_fence_ops_destroy;
ops->base.fence_reference = &vmw_fence_ops_fence_reference;
ops->base.fence_signalled = &vmw_fence_ops_fence_signalled;
@@ -319,5 +432,3 @@ vmw_fence_ops_create(struct vmw_winsys_screen *vws)
return &ops->base;
}
-
-