summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorThomas Hellstrom <thomas-at-tungstengraphics-dot-com>2009-01-13 11:41:19 +0100
committerThomas Hellstrom <thomas-at-tungstengraphics-dot-com>2009-01-13 11:41:19 +0100
commit78d9bb60d86044ea16eee86af1459b74c0fad032 (patch)
tree5df84a485a26a15cfec47befab45bf7e842f167d
Initial import
-rw-r--r--Makefile.am7
-rwxr-xr-xautogen.sh12
-rw-r--r--configure.ac30
-rw-r--r--src/Makefile.am27
-rw-r--r--src/wsbm_atomic.h78
-rw-r--r--src/wsbm_driver.c239
-rw-r--r--src/wsbm_driver.h129
-rw-r--r--src/wsbm_fencemgr.c461
-rw-r--r--src/wsbm_fencemgr.h151
-rw-r--r--src/wsbm_mallocpool.c186
-rw-r--r--src/wsbm_manager.c1188
-rw-r--r--src/wsbm_manager.h183
-rw-r--r--src/wsbm_mm.c286
-rw-r--r--src/wsbm_mm.h73
-rw-r--r--src/wsbm_pool.h156
-rw-r--r--src/wsbm_priv.h43
-rw-r--r--src/wsbm_slabpool.c1202
-rw-r--r--src/wsbm_slabpool_new.c1213
-rw-r--r--src/wsbm_ttmpool.c511
-rw-r--r--src/wsbm_userpool.c691
-rw-r--r--src/wsbm_util.h76
21 files changed, 6942 insertions, 0 deletions
diff --git a/Makefile.am b/Makefile.am
new file mode 100644
index 0000000..2940c78
--- /dev/null
+++ b/Makefile.am
@@ -0,0 +1,7 @@
+SUBDIRS = src
+EXTRA_DIST = COPYING NEWS README libwsbm.pc.in
+
+pkgconfigdir = @pkgconfigdir@
+pkgconfig_DATA = libwsbm.pc
+
+
diff --git a/autogen.sh b/autogen.sh
new file mode 100755
index 0000000..904cd67
--- /dev/null
+++ b/autogen.sh
@@ -0,0 +1,12 @@
+#! /bin/sh
+
+srcdir=`dirname $0`
+test -z "$srcdir" && srcdir=.
+
+ORIGDIR=`pwd`
+cd $srcdir
+
+autoreconf -v --install || exit 1
+cd $ORIGDIR || exit $?
+
+$srcdir/configure --enable-maintainer-mode "$@"
diff --git a/configure.ac b/configure.ac
new file mode 100644
index 0000000..9f47d54
--- /dev/null
+++ b/configure.ac
@@ -0,0 +1,30 @@
+AC_PREREQ(2.57)
+AC_INIT([libwsbm], 1.0.0, [thomas@tungstengraphics.com], libwsbm)
+AC_CONFIG_SRCDIR([Makefile.am])
+AM_INIT_AUTOMAKE([dist-bzip2])
+
+AM_CONFIG_HEADER([config.h])
+
+AC_DISABLE_STATIC
+AC_PROG_LIBTOOL
+AC_PROG_CC
+PKG_PROG_PKG_CONFIG
+
+PKG_CHECK_MODULES(libdrm, libdrm)
+AC_SUBST(libdrm_CFLAGS)
+AC_SUBST(libdrm_LIBS)
+AC_HEADER_STDC
+AC_SYS_LARGEFILE
+
+AC_CHECK_HEADER(pthread.h, [
+ AC_SEARCH_LIBS(pthread_cond_init, pthread,
+ [AC_DEFINE(HAVE_PTHREADS, 1, "os has pthreads")],,,)
+ ],,,)
+
+pkgconfigdir=${libdir}/pkgconfig
+AC_SUBST(pkgconfigdir)
+
+AC_OUTPUT([
+ Makefile
+ src/Makefile
+ libwsbm.pc])
diff --git a/src/Makefile.am b/src/Makefile.am
new file mode 100644
index 0000000..45d7ecc
--- /dev/null
+++ b/src/Makefile.am
@@ -0,0 +1,27 @@
+
+lib_LTLIBRARIES = libwsbm.la
+
+libwsbm_la_CFLAGS = @libdrm_CFLAGS@ -Wall
+libwsbm_la_LDFLAGS = -version-number 1:0:0 -no-undefined @libdrm_LIBS@
+libwsbm_la_SOURCES = \
+ wsbm_fencemgr.c \
+ wsbm_fencemgr.h \
+ wsbm_manager.c \
+ wsbm_manager.h \
+ wsbm_mm.c \
+ wsbm_mm.h \
+ wsbm_pool.h \
+ wsbm_util.h \
+ wsbm_mallocpool.c \
+ wsbm_driver.h \
+ wsbm_driver.c \
+ wsbm_ttmpool.c \
+ wsbm_slabpool.c \
+ wsbm_userpool.c \
+ wsbm_priv.h
+
+
+libwsbmincludedir = ${includedir}/wsbm
+libwsbminclude_HEADERS = wsbm_manager.h wsbm_pool.h wsbm_driver.h \
+ wsbm_fencemgr.h wsbm_util.h wsbm_atomic.h
+
diff --git a/src/wsbm_atomic.h b/src/wsbm_atomic.h
new file mode 100644
index 0000000..40980e6
--- /dev/null
+++ b/src/wsbm_atomic.h
@@ -0,0 +1,78 @@
+#ifndef _WSBM_ATOMIC_H_
+#define _WSBM_ATOMIC_H_
+
+#include <stdint.h>
+
+struct _WsbmAtomic {
+ int32_t count;
+};
+
+#define wsbmAtomicInit(_i) {(i)}
+#define wsbmAtomicSet(_v, _i) (((_v)->count) = (_i))
+#define wsbmAtomicRead(_v) ((_v)->count)
+
+static inline int
+wsbmAtomicIncZero(struct _WsbmAtomic *v)
+{
+ unsigned char c;
+ __asm__ __volatile__(
+ "lock; incl %0; sete %1"
+ :"+m" (v->count), "=qm" (c)
+ : : "memory");
+ return c != 0;
+}
+
+static inline int
+wsbmAtomicDecNegative(struct _WsbmAtomic *v)
+{
+ unsigned char c;
+ int i = -1;
+ __asm__ __volatile__(
+ "lock; addl %2,%0; sets %1"
+ :"+m" (v->count), "=qm" (c)
+ :"ir" (i) : "memory");
+ return c;
+}
+
+static inline int
+wsbmAtomicDecZero(struct _WsbmAtomic *v)
+{
+ unsigned char c;
+
+ __asm__ __volatile__(
+ "lock; decl %0; sete %1"
+ :"+m" (v->count), "=qm" (c)
+ : : "memory");
+ return c != 0;
+}
+
+static inline void wsbmAtomicInc(struct _WsbmAtomic *v)
+{
+ __asm__ __volatile__(
+ "lock; incl %0"
+ :"+m" (v->count));
+}
+
+static inline void wsbmAtomicDec(struct _WsbmAtomic *v)
+{
+ __asm__ __volatile__(
+ "lock; decl %0"
+ :"+m" (v->count));
+}
+
+static inline int32_t wsbmAtomicCmpXchg(volatile struct _WsbmAtomic *v, int32_t old,
+ int32_t new)
+{
+ int32_t previous;
+
+ __asm__ __volatile__(
+ "lock; cmpxchgl %k1,%2"
+ : "=a" (previous)
+ : "r" (new), "m" (v->count), "0" (old)
+ : "memory");
+ return previous;
+}
+
+
+
+#endif
diff --git a/src/wsbm_driver.c b/src/wsbm_driver.c
new file mode 100644
index 0000000..1402715
--- /dev/null
+++ b/src/wsbm_driver.c
@@ -0,0 +1,239 @@
+/**************************************************************************
+ *
+ * Copyright 2008 Tungsten Graphics, Inc., Cedar Park, TX., USA
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ **************************************************************************/
+/*
+ * Authors:
+ * Thomas Hellström <thomas-at-tungstengraphics-dot-com>
+ */
+
+#ifdef HAVE_CONFIG_H
+#include "config.h"
+#endif
+
+#include <stdlib.h>
+#include <stddef.h>
+#include <errno.h>
+#include "wsbm_driver.h"
+
+struct _WsbmThreadFuncs *wsbmCurThreadFunc = NULL;
+struct _WsbmVNodeFuncs *wsbmCurVNodeFunc = NULL;
+
+/*
+ * Single-threaded implementation.
+ */
+
+static int
+n_mutexInit(struct _WsbmMutex *mutex, struct _WsbmThreadFuncs *func)
+{
+ mutex->func = func;
+ return 0;
+}
+
+static int
+n_condInit(struct _WsbmCond *cond, struct _WsbmThreadFuncs *func)
+{
+ cond->func = func;
+ return 0;
+}
+
+static void
+n_mutexNone(struct _WsbmMutex *mutex)
+{
+ ;
+}
+
+static void
+n_condNone(struct _WsbmCond *cond)
+{
+ ;
+}
+
+static void
+n_condWait(struct _WsbmCond *cond, struct _WsbmMutex *mutex)
+{
+ ;
+}
+
+static struct _WsbmThreadFuncs nullFunc = {
+ .mutexInit = n_mutexInit,
+ .mutexFree = n_mutexNone,
+ .mutexLock = n_mutexNone,
+ .mutexUnlock = n_mutexNone,
+ .condInit = n_condInit,
+ .condFree = n_condNone,
+ .condWait = n_condWait,
+ .condBroadcast = n_condNone
+};
+
+struct _WsbmThreadFuncs *
+wsbmNullThreadFuncs(void)
+{
+ return &nullFunc;
+}
+
+#if (HAVE_PTHREADS == 1)
+#include "pthread.h"
+
+/*
+ * pthreads implementation:
+ */
+
+
+struct _WsbmPMutex {
+ struct _WsbmThreadFuncs *func;
+ pthread_mutex_t mutex;
+};
+
+struct _WsbmPCond {
+ struct _WsbmThreadFuncs *func;
+ pthread_cond_t cond;
+};
+
+
+static inline struct _WsbmPMutex *
+pMutexConvert(struct _WsbmMutex *m)
+{
+ union _PMutexConverter {
+ struct _WsbmMutex wm;
+ struct _WsbmPMutex pm;
+ } *um = containerOf(m, union _PMutexConverter, wm);
+
+ return &um->pm;
+}
+
+static inline struct _WsbmPCond *
+pCondConvert(struct _WsbmCond *c)
+{
+ union _PCondConverter {
+ struct _WsbmCond wc;
+ struct _WsbmPCond pc;
+ } *uc = containerOf(c, union _PCondConverter, wc);
+
+ return &uc->pc;
+}
+
+
+static int
+p_mutexInit(struct _WsbmMutex *mutex, struct _WsbmThreadFuncs *func)
+{
+ struct _WsbmPMutex *pMutex = pMutexConvert(mutex);
+
+ if (sizeof(struct _WsbmMutex) < sizeof(struct _WsbmPMutex))
+ return -EINVAL;
+
+ pMutex->func = func;
+ pthread_mutex_init(&pMutex->mutex, NULL);
+ return 0;
+}
+
+static void
+p_mutexFree(struct _WsbmMutex *mutex)
+{
+ struct _WsbmPMutex *pMutex = pMutexConvert(mutex);
+ pthread_mutex_destroy(&pMutex->mutex);
+}
+
+static void
+p_mutexLock(struct _WsbmMutex *mutex)
+{
+ struct _WsbmPMutex *pMutex = pMutexConvert(mutex);
+
+ pthread_mutex_lock(&pMutex->mutex);
+}
+
+static void
+p_mutexUnlock(struct _WsbmMutex *mutex)
+{
+ struct _WsbmPMutex *pMutex = pMutexConvert(mutex);
+
+ pthread_mutex_unlock(&pMutex->mutex);
+}
+
+static int
+p_condInit(struct _WsbmCond *cond, struct _WsbmThreadFuncs *func)
+{
+ struct _WsbmPCond *pCond = pCondConvert(cond);
+
+ if (sizeof(struct _WsbmCond) < sizeof(struct _WsbmPCond))
+ return -EINVAL;
+
+ pCond->func = func;
+ pthread_cond_init(&pCond->cond, NULL);
+ return 0;
+}
+
+static void
+p_condFree(struct _WsbmCond *cond)
+{
+ struct _WsbmPCond *pCond = pCondConvert(cond);
+
+ pthread_cond_destroy(&pCond->cond);
+}
+
+static void
+p_condBroadcast(struct _WsbmCond *cond)
+{
+ struct _WsbmPCond *pCond = pCondConvert(cond);
+
+ pthread_cond_broadcast(&pCond->cond);
+}
+
+static void
+p_condWait(struct _WsbmCond *cond, struct _WsbmMutex *mutex)
+{
+ struct _WsbmPCond *pCond = pCondConvert(cond);
+ struct _WsbmPMutex *pMutex = pMutexConvert(mutex);
+
+ pthread_cond_wait(&pCond->cond, &pMutex->mutex);
+}
+
+static struct _WsbmThreadFuncs pthreadFunc = {
+ .mutexInit = p_mutexInit,
+ .mutexFree = p_mutexFree,
+ .mutexLock = p_mutexLock,
+ .mutexUnlock = p_mutexUnlock,
+ .condInit = p_condInit,
+ .condFree = p_condFree,
+ .condWait = p_condWait,
+ .condBroadcast = p_condBroadcast
+};
+
+struct _WsbmThreadFuncs *
+wsbmPThreadFuncs(void)
+{
+ return &pthreadFunc;
+}
+
+#else
+#warning Pthreads is not present. Compiling without.
+
+struct _WsbmThreadFuncs *
+wsbmPThreadFuncs(void)
+{
+ return &pthreadFunc;
+}
+
+#endif
diff --git a/src/wsbm_driver.h b/src/wsbm_driver.h
new file mode 100644
index 0000000..7c40744
--- /dev/null
+++ b/src/wsbm_driver.h
@@ -0,0 +1,129 @@
+/**************************************************************************
+ *
+ * Copyright 2008 Tungsten Graphics, Inc., Cedar Park, TX., USA
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ **************************************************************************/
+/*
+ * Authors:
+ * Thomas Hellström <thomas-at-tungstengraphics-dot-com>
+ */
+
+#ifndef _WSBM_DRIVER_H
+#define _WSBM_DRIVER_H
+#include <stdint.h>
+#include "wsbm_util.h"
+
+#define WSBM_MUTEX_SPACE 16
+#define WSBM_COND_SPACE 16
+
+struct _WsbmMutex {
+ struct _WsbmThreadFuncs *func;
+ unsigned long storage[WSBM_MUTEX_SPACE];
+};
+
+struct _WsbmCond {
+ struct _WsbmThreadFuncs *func;
+ unsigned long storage[WSBM_COND_SPACE];
+};
+
+struct _WsbmThreadFuncs
+{
+ int (*mutexInit) (struct _WsbmMutex *,
+ struct _WsbmThreadFuncs *);
+ void (*mutexFree) (struct _WsbmMutex *);
+ void (*mutexLock) (struct _WsbmMutex *);
+ void (*mutexUnlock) (struct _WsbmMutex *);
+ int (*condInit) (struct _WsbmCond *,
+ struct _WsbmThreadFuncs *);
+ void (*condFree) (struct _WsbmCond *);
+ void (*condWait) (struct _WsbmCond *, struct _WsbmMutex *);
+ void (*condBroadcast) (struct _WsbmCond *);
+};
+
+extern struct _WsbmThreadFuncs *wsbmCurThreadFunc;
+
+#define WSBM_MUTEX_INIT(_mutex) \
+ wsbmThreadFuncs()->mutexInit(_mutex,wsbmThreadFuncs())
+#define WSBM_MUTEX_FREE(_mutex) \
+ { \
+ (_mutex)->func->mutexFree(_mutex); \
+ }
+#define WSBM_MUTEX_LOCK(_mutex) \
+ (_mutex)->func->mutexLock(_mutex);
+#define WSBM_MUTEX_UNLOCK(_mutex) \
+ (_mutex)->func->mutexUnlock(_mutex);
+
+#define WSBM_COND_INIT(_mutex) \
+ wsbmThreadFuncs()->condInit(_mutex, wsbmThreadFuncs())
+#define WSBM_COND_FREE(_cond) \
+ { \
+ (_cond)->func->condFree(_cond); \
+ }
+#define WSBM_COND_WAIT(_cond, _mutex) \
+ (_cond)->func->condWait(_cond, _mutex);
+#define WSBM_COND_BROADCAST(_cond) \
+ (_cond)->func->condBroadcast(_cond);
+
+struct _WsbmVNodeFuncs
+{
+ struct _ValidateNode *(*alloc) (struct _WsbmVNodeFuncs *, int);
+ void (*free) (struct _ValidateNode *);
+ void (*clear) (struct _ValidateNode *);
+};
+
+extern struct _WsbmVNodeFuncs *wsbmCurVNodeFunc;
+
+struct _WsbmBufStorage;
+struct _WsbmKernelBuf;
+
+struct _ValidateNode
+{
+ uint32_t hash;
+ int type_id;
+ struct _WsbmListHead head;
+ struct _WsbmListHead hashHead;
+ int listItem;
+ uint64_t set_flags;
+ uint64_t clr_flags;
+ void *buf;
+ struct _WsbmVNodeFuncs *func;
+};
+
+static inline struct _WsbmVNodeFuncs *
+wsbmVNodeFuncs(void)
+{
+ return wsbmCurVNodeFunc;
+}
+
+static inline struct _WsbmThreadFuncs *
+wsbmThreadFuncs(void)
+{
+ return wsbmCurThreadFunc;
+}
+
+extern struct _WsbmThreadFuncs *wsbmNullThreadFuncs(void);
+
+extern struct _WsbmThreadFuncs *wsbmPThreadFuncs(void);
+
+#endif
diff --git a/src/wsbm_fencemgr.c b/src/wsbm_fencemgr.c
new file mode 100644
index 0000000..e969776
--- /dev/null
+++ b/src/wsbm_fencemgr.c
@@ -0,0 +1,461 @@
+/**************************************************************************
+ *
+ * Copyright 2008 Tungsten Graphics, Inc., Cedar Park, Tx., USA
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ **************************************************************************/
+/*
+ * Authors: Thomas Hellstrom <thomas-at-tungstengraphics-dot-com>
+ */
+
+#ifdef HAVE_CONFIG_H
+#include "config.h"
+#endif
+
+#include "wsbm_fencemgr.h"
+#include "wsbm_pool.h"
+#include "wsbm_manager.h"
+#include <xf86drm.h>
+#include <ttm/ttm_fence_user.h>
+#include <string.h>
+#include <unistd.h>
+
+struct _WsbmFenceClass {
+ struct _WsbmListHead head;
+ struct _WsbmMutex mutex;
+ struct _WsbmMutex cmd_mutex;
+};
+
+/*
+ * Note: The struct _WsbmFenceMgr::Mutex should never be held
+ * during sleeps, since that may block fast concurrent access to
+ * fence data.
+ */
+
+struct _WsbmFenceMgr
+{
+ /*
+ * Constant members. Need no mutex protection.
+ */
+ struct _WsbmFenceMgrCreateInfo info;
+ void *private;
+
+ /*
+ * Atomic members. No mutex protection.
+ */
+
+ struct _WsbmAtomic count;
+
+ /*
+ * These members are protected by this->mutex
+ */
+
+ struct _WsbmFenceClass *classes;
+ uint32_t num_classes;
+};
+
+struct _WsbmFenceObject
+{
+
+ /*
+ * These members are constant and need no mutex protection.
+ * Note that @private may point to a structure with its own
+ * mutex protection, that we don't care about.
+ */
+
+ struct _WsbmFenceMgr *mgr;
+ uint32_t fence_class;
+ uint32_t fence_type;
+ void *private;
+
+ /*
+ * Atomic members. No mutex protection. note that
+ * @signaled types is updated using a compare-and-swap
+ * scheme to guarantee atomicity.
+ */
+
+ struct _WsbmAtomic refCount;
+ struct _WsbmAtomic signaled_types;
+
+ /*
+ * These members are protected by mgr->mutex.
+ */
+ struct _WsbmListHead head;
+};
+
+uint32_t
+wsbmFenceType(struct _WsbmFenceObject *fence)
+{
+ return fence->fence_type;
+}
+
+struct _WsbmFenceMgr *
+wsbmFenceMgrCreate(const struct _WsbmFenceMgrCreateInfo *info)
+{
+ struct _WsbmFenceMgr *tmp;
+ uint32_t i,j;
+ int ret;
+
+ tmp = calloc(1, sizeof(*tmp));
+ if (!tmp)
+ return NULL;
+
+ tmp->info = *info;
+ tmp->classes = calloc(tmp->info.num_classes, sizeof(*tmp->classes));
+ if (!tmp->classes)
+ goto out_err;
+
+ for (i = 0; i < tmp->info.num_classes; ++i) {
+ struct _WsbmFenceClass *fc = &tmp->classes[i];
+ WSBMINITLISTHEAD(&fc->head);
+ ret = WSBM_MUTEX_INIT(&fc->mutex);
+ if (ret)
+ goto out_err1;
+ ret = WSBM_MUTEX_INIT(&fc->cmd_mutex);
+ if (ret) {
+ WSBM_MUTEX_FREE(&fc->mutex);
+ goto out_err1;
+ }
+ }
+ wsbmAtomicSet(&tmp->count, 0);
+
+ return tmp;
+
+ out_err1:
+ for (j=0; j<i; ++j) {
+ WSBM_MUTEX_FREE(&tmp->classes[j].mutex);
+ WSBM_MUTEX_FREE(&tmp->classes[j].cmd_mutex);
+ }
+ free(tmp->classes);
+ out_err:
+ if (tmp)
+ free(tmp);
+ return NULL;
+}
+
+void
+wsbmFenceUnreference(struct _WsbmFenceObject **pFence)
+{
+ struct _WsbmFenceObject *fence = *pFence;
+ struct _WsbmFenceMgr *mgr;
+
+ *pFence = NULL;
+ if (fence == NULL)
+ return;
+
+ mgr = fence->mgr;
+ if (wsbmAtomicDecZero(&fence->refCount)) {
+ struct _WsbmFenceClass *fc = &mgr->classes[fence->fence_class];
+ WSBM_MUTEX_LOCK(&fc->mutex);
+ WSBMLISTDELINIT(&fence->head);
+ WSBM_MUTEX_UNLOCK(&fc->mutex);
+ if (fence->private)
+ mgr->info.unreference(mgr, &fence->private);
+ fence->mgr = NULL;
+ wsbmAtomicDecZero(&mgr->count);
+ free(fence);
+ }
+}
+
+static void
+wsbmSignalPreviousFences(struct _WsbmFenceMgr *mgr,
+ struct _WsbmListHead *list,
+ uint32_t fence_class, uint32_t signaled_types)
+{
+ struct _WsbmFenceClass *fc = &mgr->classes[fence_class];
+ struct _WsbmFenceObject *entry;
+ struct _WsbmListHead *prev;
+ uint32_t old_signaled_types;
+ uint32_t ret_st;
+
+ WSBM_MUTEX_LOCK(&fc->mutex);
+ while (list != &fc->head && list->next != list) {
+ entry = WSBMLISTENTRY(list, struct _WsbmFenceObject, head);
+ prev = list->prev;
+
+ do {
+ old_signaled_types = wsbmAtomicRead(&entry->signaled_types);
+ signaled_types = old_signaled_types | (signaled_types & entry->fence_type);
+ if (signaled_types == old_signaled_types)
+ break;
+
+ ret_st = wsbmAtomicCmpXchg(&entry->signaled_types, old_signaled_types,
+ signaled_types);
+ } while(ret_st != old_signaled_types);
+
+ if (signaled_types == entry->fence_type)
+ WSBMLISTDELINIT(list);
+
+ list = prev;
+ }
+ WSBM_MUTEX_UNLOCK(&fc->mutex);
+}
+
+int
+wsbmFenceFinish(struct _WsbmFenceObject *fence, uint32_t fence_type,
+ int lazy_hint)
+{
+ struct _WsbmFenceMgr *mgr = fence->mgr;
+ int ret = 0;
+
+
+ if ((wsbmAtomicRead(&fence->signaled_types) & fence_type) == fence_type)
+ goto out;
+
+ ret = mgr->info.finish(mgr, fence->private, fence_type, lazy_hint);
+ if (ret)
+ goto out;
+
+ wsbmSignalPreviousFences(mgr, &fence->head, fence->fence_class,
+ fence_type);
+ out:
+ return ret;
+}
+
+uint32_t
+wsbmFenceSignaledTypeCached(struct _WsbmFenceObject * fence)
+{
+ return wsbmAtomicRead(&fence->signaled_types);
+}
+
+int
+wsbmFenceSignaledType(struct _WsbmFenceObject *fence, uint32_t flush_type,
+ uint32_t * signaled)
+{
+ int ret = 0;
+ struct _WsbmFenceMgr *mgr;
+ uint32_t signaled_types;
+ uint32_t old_signaled_types;
+ uint32_t ret_st;
+
+ mgr = fence->mgr;
+ *signaled = wsbmAtomicRead(&fence->signaled_types);
+ if ((*signaled & flush_type) == flush_type)
+ goto out0;
+
+ ret = mgr->info.signaled(mgr, fence->private, flush_type, signaled);
+ if (ret) {
+ *signaled = wsbmAtomicRead(&fence->signaled_types);
+ goto out0;
+ }
+
+ do {
+ old_signaled_types = wsbmAtomicRead(&fence->signaled_types);
+ signaled_types = old_signaled_types | *signaled;
+ if (signaled_types == old_signaled_types)
+ break;
+
+ ret_st = wsbmAtomicCmpXchg(&fence->signaled_types, old_signaled_types,
+ signaled_types);
+ if (old_signaled_types == ret_st)
+ wsbmSignalPreviousFences(mgr, &fence->head, fence->fence_class,
+ *signaled);
+ } while(old_signaled_types != ret_st);
+
+ return 0;
+ out0:
+ return ret;
+}
+
+struct _WsbmFenceObject *
+wsbmFenceReference(struct _WsbmFenceObject *fence)
+{
+ if (fence == NULL)
+ return NULL;
+ wsbmAtomicInc(&fence->refCount);
+ return fence;
+}
+
+struct _WsbmFenceObject *
+wsbmFenceCreate(struct _WsbmFenceMgr *mgr, uint32_t fence_class,
+ uint32_t fence_type, void *private, size_t private_size)
+{
+ struct _WsbmFenceClass *fc = &mgr->classes[fence_class];
+ struct _WsbmFenceObject *fence;
+ size_t fence_size = sizeof(*fence);
+
+ if (private_size)
+ fence_size = ((fence_size + 15) & ~15);
+
+ fence = calloc(1, fence_size + private_size);
+
+ if (!fence)
+ goto out_err;
+
+ wsbmAtomicSet(&fence->refCount,1);
+ fence->mgr = mgr;
+ fence->fence_class = fence_class;
+ fence->fence_type = fence_type;
+ wsbmAtomicSet(&fence->signaled_types,0);
+ fence->private = private;
+ if (private_size) {
+ fence->private = (void *)(((uint8_t *) fence) + fence_size);
+ memcpy(fence->private, private, private_size);
+ }
+
+ WSBM_MUTEX_LOCK(&fc->mutex);
+ WSBMLISTADDTAIL(&fence->head, &fc->head);
+ WSBM_MUTEX_UNLOCK(&fc->mutex);
+ wsbmAtomicInc(&mgr->count);
+ return fence;
+
+ out_err:
+ {
+ int ret = mgr->info.finish(mgr, private, fence_type, 0);
+
+ if (ret)
+ usleep(10000000);
+ }
+ if (fence)
+ free(fence);
+
+ mgr->info.unreference(mgr, &private);
+ return NULL;
+}
+
+struct _WsbmTTMFenceMgrPriv
+{
+ int fd;
+ unsigned int devOffset;
+};
+
+static int
+tSignaled(struct _WsbmFenceMgr *mgr, void *private, uint32_t flush_type,
+ uint32_t * signaled_type)
+{
+ struct _WsbmTTMFenceMgrPriv *priv =
+ (struct _WsbmTTMFenceMgrPriv *)mgr->private;
+ union ttm_fence_signaled_arg arg;
+ int ret;
+
+ arg.req.handle = (unsigned long)private;
+ arg.req.fence_type = flush_type;
+ arg.req.flush = 1;
+ *signaled_type = 0;
+
+ ret = drmCommandWriteRead(priv->fd, priv->devOffset + TTM_FENCE_SIGNALED,
+ &arg, sizeof(arg));
+ if (ret)
+ return ret;
+
+ *signaled_type = arg.rep.signaled_types;
+ return 0;
+}
+
+static int
+tFinish(struct _WsbmFenceMgr *mgr, void *private, uint32_t fence_type,
+ int lazy_hint)
+{
+ struct _WsbmTTMFenceMgrPriv *priv =
+ (struct _WsbmTTMFenceMgrPriv *)mgr->private;
+ union ttm_fence_finish_arg arg =
+ {.req =
+ {.handle = (unsigned long)private,
+ .fence_type = fence_type,
+ .mode = (lazy_hint) ? TTM_FENCE_FINISH_MODE_LAZY : 0
+ }
+ };
+ int ret;
+
+ do {
+ ret = drmCommandWrite(priv->fd, priv->devOffset + TTM_FENCE_FINISH,
+ &arg, sizeof(arg));
+ } while (ret == -EAGAIN || ret == -ERESTART);
+
+ return ret;
+}
+
+static int
+tUnref(struct _WsbmFenceMgr *mgr, void **private)
+{
+ struct _WsbmTTMFenceMgrPriv *priv =
+ (struct _WsbmTTMFenceMgrPriv *)mgr->private;
+ struct ttm_fence_unref_arg arg = {.handle = (unsigned long) *private };
+
+ *private = NULL;
+
+ return drmCommandWrite(priv->fd, priv->devOffset + TTM_FENCE_UNREF,
+ &arg, sizeof(arg));
+}
+
+struct _WsbmFenceMgr *
+wsbmFenceMgrTTMInit(int fd, unsigned int numClass, unsigned int devOffset)
+{
+ struct _WsbmFenceMgrCreateInfo info;
+ struct _WsbmFenceMgr *mgr;
+ struct _WsbmTTMFenceMgrPriv *priv = malloc(sizeof(*priv));
+
+ if (!priv)
+ return NULL;
+
+ priv->fd = fd;
+ priv->devOffset = devOffset;
+
+ info.flags = WSBM_FENCE_CLASS_ORDERED;
+ info.num_classes = numClass;
+ info.signaled = tSignaled;
+ info.finish = tFinish;
+ info.unreference = tUnref;
+
+ mgr = wsbmFenceMgrCreate(&info);
+ if (mgr == NULL) {
+ free(priv);
+ return NULL;
+ }
+
+ mgr->private = (void *)priv;
+ return mgr;
+}
+
+void wsbmFenceCmdLock(struct _WsbmFenceMgr *mgr,
+ uint32_t fence_class)
+{
+ WSBM_MUTEX_LOCK(&mgr->classes[fence_class].cmd_mutex);
+}
+
+void wsbmFenceCmdUnlock(struct _WsbmFenceMgr *mgr,
+ uint32_t fence_class)
+{
+ WSBM_MUTEX_UNLOCK(&mgr->classes[fence_class].cmd_mutex);
+}
+
+
+void
+wsbmFenceMgrTTMTakedown(struct _WsbmFenceMgr *mgr)
+{
+ int i;
+
+ if (!mgr)
+ return;
+
+ if (mgr->private)
+ free(mgr->private);
+
+ for (i=0; i<mgr->info.num_classes; ++i) {
+ WSBM_MUTEX_FREE(&mgr->classes[i].mutex);
+ WSBM_MUTEX_FREE(&mgr->classes[i].cmd_mutex);
+ }
+ free(mgr);
+
+ return;
+}
diff --git a/src/wsbm_fencemgr.h b/src/wsbm_fencemgr.h
new file mode 100644
index 0000000..92b0740
--- /dev/null
+++ b/src/wsbm_fencemgr.h
@@ -0,0 +1,151 @@
+/**************************************************************************
+ *
+ * Copyright 2008 Tungsten Graphics, Inc., Cedar Park, Tx., USA
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ **************************************************************************/
+/*
+ * Authors: Thomas Hellstrom <thomas-at-tungstengraphics-dot-com>
+ */
+
+#ifndef WSBM_FENCEMGR_H
+#define WSBM_FENCEMGR_H
+
+#include <stdint.h>
+#include <stdlib.h>
+
+struct _WsbmFenceObject;
+struct _WsbmFenceMgr;
+
+/*
+ * Do a quick check to see if the fence manager has registered the fence
+ * object as signaled. Note that this function may return a false negative
+ * answer.
+ */
+extern uint32_t wsbmFenceSignaledTypeCached(struct _WsbmFenceObject *fence);
+
+/*
+ * Check if the fence object is signaled. This function can be substantially
+ * more expensive to call than the above function, but will not return a false
+ * negative answer. The argument "flush_type" sets the types that the
+ * underlying mechanism must make sure will eventually signal.
+ */
+extern int wsbmFenceSignaledType(struct _WsbmFenceObject *fence,
+ uint32_t flush_type, uint32_t * signaled);
+
+/*
+ * Convenience functions.
+ */
+
+static inline int
+wsbmFenceSignaled(struct _WsbmFenceObject *fence, uint32_t flush_type)
+{
+ uint32_t signaled_types;
+ int ret = wsbmFenceSignaledType(fence, flush_type, &signaled_types);
+
+ if (ret)
+ return 0;
+ return ((signaled_types & flush_type) == flush_type);
+}
+
+static inline int
+wsbmFenceSignaledCached(struct _WsbmFenceObject *fence, uint32_t flush_type)
+{
+ uint32_t signaled_types = wsbmFenceSignaledTypeCached(fence);
+
+ return ((signaled_types & flush_type) == flush_type);
+}
+
+/*
+ * Reference a fence object.
+ */
+extern struct _WsbmFenceObject *wsbmFenceReference(struct _WsbmFenceObject
+ *fence);
+
+/*
+ * Unreference a fence object. The fence object pointer will be reset to NULL.
+ */
+
+extern void wsbmFenceUnreference(struct _WsbmFenceObject **pFence);
+
+/*
+ * Wait for a fence to signal the indicated fence_type.
+ * If "lazy_hint" is true, it indicates that the wait may sleep to avoid
+ * busy-wait polling.
+ */
+extern int wsbmFenceFinish(struct _WsbmFenceObject *fence, uint32_t fence_type,
+ int lazy_hint);
+
+/*
+ * Create a WsbmFenceObject for manager "mgr".
+ *
+ * "private" is a pointer that should be used for the callbacks in
+ * struct _WsbmFenceMgrCreateInfo.
+ *
+ * if private_size is nonzero, then the info stored at *private, with size
+ * private size will be copied and the fence manager will instead use a
+ * pointer to the copied data for the callbacks in
+ * struct _WsbmFenceMgrCreateInfo. In that case, the object pointed to by
+ * "private" may be destroyed after the call to wsbmFenceCreate.
+ */
+extern struct _WsbmFenceObject *wsbmFenceCreate(struct _WsbmFenceMgr *mgr,
+ uint32_t fence_class,
+ uint32_t fence_type,
+ void *private,
+ size_t private_size);
+
+extern uint32_t wsbmFenceType(struct _WsbmFenceObject *fence);
+
+/*
+ * Fence creations are ordered. If a fence signals a fence_type,
+ * it is safe to assume that all fences of the same class that was
+ * created before that fence has signaled the same type.
+ */
+
+#define WSBM_FENCE_CLASS_ORDERED (1 << 0)
+
+struct _WsbmFenceMgrCreateInfo
+{
+ uint32_t flags;
+ uint32_t num_classes;
+ int (*signaled) (struct _WsbmFenceMgr * mgr, void *private,
+ uint32_t flush_type, uint32_t * signaled_type);
+ int (*finish) (struct _WsbmFenceMgr * mgr, void *private,
+ uint32_t fence_type, int lazy_hint);
+ int (*unreference) (struct _WsbmFenceMgr * mgr, void **private);
+};
+
+extern struct _WsbmFenceMgr *wsbmFenceMgrCreate(const struct
+ _WsbmFenceMgrCreateInfo *info);
+extern void wsbmFenceCmdLock(struct _WsbmFenceMgr *mgr,
+ uint32_t fence_class);
+extern void wsbmFenceCmdUnlock(struct _WsbmFenceMgr *mgr,
+ uint32_t fence_class);
+/*
+ * Builtin drivers.
+ */
+
+extern struct _WsbmFenceMgr *wsbmFenceMgrTTMInit(int fd, unsigned int numClass,
+ unsigned int devOffset);
+extern void wsbmFenceMgrTTMTakedown(struct _WsbmFenceMgr *mgr);
+#endif
diff --git a/src/wsbm_mallocpool.c b/src/wsbm_mallocpool.c
new file mode 100644
index 0000000..7fc748d
--- /dev/null
+++ b/src/wsbm_mallocpool.c
@@ -0,0 +1,186 @@
+/**************************************************************************
+ *
+ * Copyright 2008 Tungsten Graphics, Inc., Cedar Park, TX., USA
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ *
+ **************************************************************************/
+/*
+ * Authors: Thomas Hellström <thomas-at-tungstengraphics-dot-com>
+ */
+
+#ifdef HAVE_CONFIG_H
+#include "config.h"
+#endif
+
+#include <stdlib.h>
+#include <errno.h>
+#include "wsbm_pool.h"
+#include "wsbm_manager.h"
+
+struct _WsbmMallocBuffer
+{
+ struct _WsbmBufStorage buf;
+ size_t size;
+ void *mem;
+};
+
+static inline struct _WsbmMallocBuffer *
+mallocBuf(struct _WsbmBufStorage *buf)
+{
+ return containerOf(buf, struct _WsbmMallocBuffer, buf);
+}
+
+static struct _WsbmBufStorage *
+pool_create(struct _WsbmBufferPool *pool,
+ unsigned long size, uint32_t placement, unsigned alignment)
+{
+ struct _WsbmMallocBuffer *mBuf = malloc(size + sizeof(*mBuf) + 16);
+
+ if (!mBuf)
+ return NULL;
+
+ wsbmBufStorageInit(&mBuf->buf, pool);
+ mBuf->size = size;
+ mBuf->mem = (void *)((unsigned long)mBuf + sizeof(*mBuf));
+ if ((placement & WSBM_PL_MASK_MEM) != WSBM_PL_FLAG_SYSTEM)
+ abort();
+
+ return &mBuf->buf;
+}
+
+static void
+pool_destroy(struct _WsbmBufStorage **buf)
+{
+ free(mallocBuf(*buf));
+ *buf = NULL;
+}
+
+static int
+pool_waitIdle(struct _WsbmBufStorage *buf, int lazy)
+{
+ return 0;
+}
+
+static int
+pool_map(struct _WsbmBufStorage *buf, unsigned mode, void **virtual)
+{
+ *virtual = mallocBuf(buf)->mem;
+ return 0;
+}
+
+static void
+pool_unmap(struct _WsbmBufStorage *buf)
+{
+ ;
+}
+
+static int
+pool_syncforcpu (struct _WsbmBufStorage *buf, unsigned mode)
+{
+ return 0;
+}
+
+static void
+pool_releasefromcpu (struct _WsbmBufStorage *buf, unsigned mode)
+{
+ ;
+}
+
+static unsigned long
+pool_offset(struct _WsbmBufStorage *buf)
+{
+ /*
+ * BUG
+ */
+ abort();
+ return 0UL;
+}
+
+static unsigned long
+pool_poolOffset(struct _WsbmBufStorage *buf)
+{
+ /*
+ * BUG
+ */
+ abort();
+}
+
+static uint32_t
+pool_placement(struct _WsbmBufStorage *buf)
+{
+ return WSBM_PL_FLAG_SYSTEM | WSBM_PL_FLAG_CACHED;
+}
+
+static unsigned long
+pool_size(struct _WsbmBufStorage *buf)
+{
+ return mallocBuf(buf)->size;
+}
+
+static void
+pool_fence(struct _WsbmBufStorage *buf, struct _WsbmFenceObject *fence)
+{
+ abort();
+}
+
+static struct _WsbmKernelBuf *
+pool_kernel(struct _WsbmBufStorage *buf)
+{
+ abort();
+ return NULL;
+}
+
+static void
+pool_takedown(struct _WsbmBufferPool *pool)
+{
+ free(pool);
+}
+
+struct _WsbmBufferPool *
+wsbmMallocPoolInit(void)
+{
+ struct _WsbmBufferPool *pool;
+
+ pool = (struct _WsbmBufferPool *)calloc(1, sizeof(*pool));
+ if (!pool)
+ return NULL;
+
+ pool->fd = -1;
+ pool->map = &pool_map;
+ pool->unmap = &pool_unmap;
+ pool->syncforcpu = &pool_syncforcpu;
+ pool->releasefromcpu = &pool_releasefromcpu;
+ pool->destroy = &pool_destroy;
+ pool->offset = &pool_offset;
+ pool->poolOffset = &pool_poolOffset;
+ pool->placement = &pool_placement;
+ pool->size = &pool_size;
+ pool->create = &pool_create;
+ pool->fence = &pool_fence;
+ pool->kernel = &pool_kernel;
+ pool->validate = NULL;
+ pool->waitIdle = &pool_waitIdle;
+ pool->takeDown = &pool_takedown;
+ return pool;
+}
diff --git a/src/wsbm_manager.c b/src/wsbm_manager.c
new file mode 100644
index 0000000..79223ce
--- /dev/null
+++ b/src/wsbm_manager.c
@@ -0,0 +1,1188 @@
+/**************************************************************************
+ *
+ * Copyright 2006-2008 Tungsten Graphics, Inc., Cedar Park, TX., USA
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ *
+ **************************************************************************/
+/*
+ * Authors: Thomas Hellström <thomas-at-tungstengraphics-dot-com>
+ * Keith Whitwell <keithw-at-tungstengraphics-dot-com>
+ */
+
+#ifdef HAVE_CONFIG_H
+#include "config.h"
+#endif
+
+#include <stdlib.h>
+#include "errno.h"
+#include "string.h"
+#include "wsbm_pool.h"
+#include "wsbm_manager.h"
+#include "wsbm_fencemgr.h"
+#include "wsbm_driver.h"
+#include "wsbm_priv.h"
+#include "wsbm_util.h"
+#include "wsbm_atomic.h"
+#include "assert.h"
+
+#define WSBM_LIST_HASHTAB_SIZE 256
+#define WSBM_LIST_HASHTAB_MASK 0xff
+#define WSBM_BODATA_SIZE_ACCEPT 4096
+
+#define WSBM_BUFFER_COMPLEX 0
+#define WSBM_BUFFER_SIMPLE 1
+#define WSBM_BUFFER_REF 2
+
+struct _ValidateList
+{
+ unsigned numTarget;
+ unsigned numCurrent;
+ unsigned numOnList;
+ int driverData;
+ struct _WsbmListHead list;
+ struct _WsbmListHead free;
+ struct _WsbmListHead hashTable[WSBM_LIST_HASHTAB_SIZE];
+};
+
+struct _WsbmBufferObject
+{
+ /* Left to the client to protect this data for now. */
+
+ struct _WsbmAtomic refCount;
+ struct _WsbmBufStorage *storage;
+
+ uint32_t placement;
+ unsigned alignment;
+ unsigned bufferType;
+ struct _WsbmBufferPool *pool;
+};
+
+
+struct _WsbmBufferList
+{
+ int hasKernelBuffers;
+
+ struct _ValidateList kernelBuffers; /* List of kernel buffers needing validation */
+ struct _ValidateList userBuffers; /* List of user-space buffers needing validation */
+};
+
+static struct _WsbmMutex bmMutex;
+static struct _WsbmCond bmCond;
+static int initialized = 0;
+static void *commonData = NULL;
+
+static int kernelReaders = 0;
+static int kernelLocked = 0;
+
+int
+wsbmInit(struct _WsbmThreadFuncs *tf, struct _WsbmVNodeFuncs *vf)
+{
+ int ret;
+
+ wsbmCurThreadFunc = tf;
+ wsbmCurVNodeFunc = vf;
+
+ ret = WSBM_MUTEX_INIT(&bmMutex);
+ if (ret)
+ return -ENOMEM;
+ ret = WSBM_COND_INIT(&bmCond);
+ if (ret) {
+ WSBM_MUTEX_FREE(&bmMutex);
+ return -ENOMEM;
+ }
+
+ initialized = 1;
+ return 0;
+}
+
+void wsbmCommonDataSet(void *d)
+{
+ commonData = d;
+}
+
+void *wsbmCommonDataGet(void)
+{
+ return commonData;
+}
+
+int wsbmIsInitialized(void)
+{
+ return initialized;
+}
+
+void
+wsbmTakedown(void)
+{
+ initialized = 0;
+ commonData = NULL;
+ WSBM_COND_FREE(&bmCond);
+ WSBM_MUTEX_FREE(&bmMutex);
+}
+
+static struct _ValidateNode *
+validateListAddNode(struct _ValidateList *list, void *item,
+ uint32_t hash, uint64_t flags, uint64_t mask)
+{
+ struct _ValidateNode *node;
+ struct _WsbmListHead *l;
+ struct _WsbmListHead *hashHead;
+
+ l = list->free.next;
+ if (l == &list->free) {
+ node = wsbmVNodeFuncs()->alloc(wsbmVNodeFuncs(), 0);
+ if (!node) {
+ return NULL;
+ }
+ list->numCurrent++;
+ } else {
+ WSBMLISTDEL(l);
+ node = WSBMLISTENTRY(l, struct _ValidateNode, head);
+ }
+ node->buf = item;
+ node->set_flags = flags & mask;
+ node->clr_flags = (~flags) & mask;
+ node->listItem = list->numOnList;
+ WSBMLISTADDTAIL(&node->head, &list->list);
+ list->numOnList++;
+ hashHead = list->hashTable + hash;
+ WSBMLISTADDTAIL(&node->hashHead, hashHead);
+
+ return node;
+}
+
+static uint32_t
+wsbmHashFunc(uint8_t * key, uint32_t len)
+{
+ uint32_t hash, i;
+
+ for (hash = 0, i = 0; i < len; ++i) {
+ hash += *key++;
+ hash += (hash << 10);
+ hash ^= (hash >> 6);
+ }
+
+ hash += (hash << 3);
+ hash ^= (hash >> 11);
+ hash += (hash << 15);
+
+ return hash & WSBM_LIST_HASHTAB_MASK;
+}
+
+static void
+validateFreeList(struct _ValidateList *list)
+{
+ struct _ValidateNode *node;
+ struct _WsbmListHead *l;
+
+ l = list->list.next;
+ while (l != &list->list) {
+ WSBMLISTDEL(l);
+ node = WSBMLISTENTRY(l, struct _ValidateNode, head);
+
+ WSBMLISTDEL(&node->hashHead);
+ node->func->free(node);
+ l = list->list.next;
+ list->numCurrent--;
+ list->numOnList--;
+ }
+
+ l = list->free.next;
+ while (l != &list->free) {
+ WSBMLISTDEL(l);
+ node = WSBMLISTENTRY(l, struct _ValidateNode, head);
+
+ node->func->free(node);
+ l = list->free.next;
+ list->numCurrent--;
+ }
+}
+
+static int
+validateListAdjustNodes(struct _ValidateList *list)
+{
+ struct _ValidateNode *node;
+ struct _WsbmListHead *l;
+ int ret = 0;
+
+ while (list->numCurrent < list->numTarget) {
+ node = wsbmVNodeFuncs()->alloc(wsbmVNodeFuncs(), list->driverData);
+ if (!node) {
+ ret = -ENOMEM;
+ break;
+ }
+ list->numCurrent++;
+ WSBMLISTADD(&node->head, &list->free);
+ }
+
+ while (list->numCurrent > list->numTarget) {
+ l = list->free.next;
+ if (l == &list->free)
+ break;
+ WSBMLISTDEL(l);
+ node = WSBMLISTENTRY(l, struct _ValidateNode, head);
+
+ node->func->free(node);
+ list->numCurrent--;
+ }
+ return ret;
+}
+
+static int
+validateCreateList(int numTarget, struct _ValidateList *list, int driverData)
+{
+ int i;
+
+ for (i = 0; i < WSBM_LIST_HASHTAB_SIZE; ++i)
+ WSBMINITLISTHEAD(&list->hashTable[i]);
+
+ WSBMINITLISTHEAD(&list->list);
+ WSBMINITLISTHEAD(&list->free);
+ list->numTarget = numTarget;
+ list->numCurrent = 0;
+ list->numOnList = 0;
+ list->driverData = driverData;
+ return validateListAdjustNodes(list);
+}
+
+static int
+validateResetList(struct _ValidateList *list)
+{
+ struct _WsbmListHead *l;
+ struct _ValidateNode *node;
+ int ret;
+
+ ret = validateListAdjustNodes(list);
+ if (ret)
+ return ret;
+
+ l = list->list.next;
+ while (l != &list->list) {
+ WSBMLISTDEL(l);
+ node = WSBMLISTENTRY(l, struct _ValidateNode, head);
+
+ WSBMLISTDEL(&node->hashHead);
+ WSBMLISTADD(l, &list->free);
+ list->numOnList--;
+ l = list->list.next;
+ }
+ return validateListAdjustNodes(list);
+}
+
+void
+wsbmWriteLockKernelBO(void)
+{
+ WSBM_MUTEX_LOCK(&bmMutex);
+ while (kernelReaders != 0)
+ WSBM_COND_WAIT(&bmCond, &bmMutex);
+ kernelLocked = 1;
+}
+
+void
+wsbmWriteUnlockKernelBO(void)
+{
+ kernelLocked = 0;
+ WSBM_MUTEX_UNLOCK(&bmMutex);
+}
+
+void
+wsbmReadLockKernelBO(void)
+{
+ WSBM_MUTEX_LOCK(&bmMutex);
+ if (kernelReaders++ == 0)
+ kernelLocked = 1;
+ WSBM_MUTEX_UNLOCK(&bmMutex);
+}
+
+void
+wsbmReadUnlockKernelBO(void)
+{
+ WSBM_MUTEX_LOCK(&bmMutex);
+ if (--kernelReaders == 0) {
+ kernelLocked = 0;
+ WSBM_COND_BROADCAST(&bmCond);
+ }
+ WSBM_MUTEX_UNLOCK(&bmMutex);
+}
+
+void
+wsbmBOWaitIdle(struct _WsbmBufferObject *buf, int lazy)
+{
+ struct _WsbmBufStorage *storage;
+
+ storage = buf->storage;
+ if (!storage)
+ return;
+
+ (void)storage->pool->waitIdle(storage, lazy);
+}
+
+void *
+wsbmBOMap(struct _WsbmBufferObject *buf, unsigned mode)
+{
+ struct _WsbmBufStorage *storage = buf->storage;
+ void *virtual;
+ int retval;
+
+ retval = storage->pool->map(storage, mode, &virtual);
+
+ return (retval == 0) ? virtual : NULL;
+}
+
+void
+wsbmBOUnmap(struct _WsbmBufferObject *buf)
+{
+ struct _WsbmBufStorage *storage = buf->storage;
+
+ storage->pool->unmap(storage);
+}
+
+int
+wsbmBOSyncForCpu(struct _WsbmBufferObject *buf, unsigned mode)
+{
+ struct _WsbmBufStorage *storage = buf->storage;
+
+ return storage->pool->syncforcpu(storage, mode);
+}
+
+void
+wsbmBOReleaseFromCpu(struct _WsbmBufferObject *buf, unsigned mode)
+{
+ struct _WsbmBufStorage *storage = buf->storage;
+
+ storage->pool->releasefromcpu(storage, mode);
+}
+
+unsigned long
+wsbmBOOffsetHint(struct _WsbmBufferObject *buf)
+{
+ struct _WsbmBufStorage *storage = buf->storage;
+
+ return storage->pool->offset(storage);
+}
+
+unsigned long
+wsbmBOPoolOffset(struct _WsbmBufferObject *buf)
+{
+ struct _WsbmBufStorage *storage = buf->storage;
+
+ return storage->pool->poolOffset(storage);
+}
+
+uint32_t
+wsbmBOPlacementHint(struct _WsbmBufferObject * buf)
+{
+ struct _WsbmBufStorage *storage = buf->storage;
+ assert(buf->storage != NULL);
+
+ return storage->pool->placement(storage);
+}
+
+struct _WsbmBufferObject *
+wsbmBOReference(struct _WsbmBufferObject *buf)
+{
+ if (buf->bufferType == WSBM_BUFFER_SIMPLE) {
+ wsbmAtomicInc(&buf->storage->refCount);
+ } else {
+ wsbmAtomicInc(&buf->refCount);
+ }
+ return buf;
+}
+
+int
+wsbmBOSetStatus(struct _WsbmBufferObject *buf,
+ uint32_t setFlags, uint32_t clrFlags)
+{
+ struct _WsbmBufStorage *storage = buf->storage;
+
+ if (!storage)
+ return 0;
+
+ if (storage->pool->setStatus == NULL)
+ return -EINVAL;
+
+ return storage->pool->setStatus(storage, setFlags, clrFlags);
+}
+
+void
+wsbmBOUnreference(struct _WsbmBufferObject **p_buf)
+{
+ struct _WsbmBufferObject *buf = *p_buf;
+ *p_buf = NULL;
+
+ if (!buf)
+ return;
+
+ if (buf->bufferType == WSBM_BUFFER_SIMPLE) {
+ struct _WsbmBufStorage *dummy = buf->storage;
+
+ wsbmBufStorageUnref(&dummy);
+ return;
+ }
+
+ if (wsbmAtomicDecZero(&buf->refCount)) {
+ wsbmBufStorageUnref(&buf->storage);
+ free(buf);
+ }
+}
+
+int
+wsbmBOData(struct _WsbmBufferObject *buf,
+ unsigned size, const void *data,
+ struct _WsbmBufferPool * newPool, uint32_t placement)
+{
+ void *virtual = NULL;
+ int newBuffer;
+ int retval = 0;
+ struct _WsbmBufStorage *storage;
+ int synced = 0;
+ uint32_t placement_diff;
+ struct _WsbmBufferPool *curPool;
+
+ if (buf->bufferType == WSBM_BUFFER_SIMPLE)
+ return -EINVAL;
+
+ storage = buf->storage;
+
+ if (newPool == NULL)
+ newPool = buf->pool;
+
+ if (newPool == NULL)
+ return -EINVAL;
+
+ newBuffer = (!storage || storage->pool != newPool ||
+ storage->pool->size(storage) < size ||
+ storage->pool->size(storage) > size + WSBM_BODATA_SIZE_ACCEPT);
+
+ if (!placement)
+ placement = buf->placement;
+
+ if (newBuffer) {
+ if (buf->bufferType == WSBM_BUFFER_REF)
+ return -EINVAL;
+
+ wsbmBufStorageUnref(&buf->storage);
+
+ if (size == 0) {
+ buf->pool = newPool;
+ buf->placement = placement;
+ retval = 0;
+ goto out;
+ }
+
+ buf->storage = newPool->create(newPool, size, placement, buf->alignment);
+ if (!buf->storage) {
+ retval = -ENOMEM;
+ goto out;
+ }
+
+ buf->placement = placement;
+ buf->pool = newPool;
+ } else if (wsbmAtomicRead(&storage->onList) ||
+ 0 != storage->pool->syncforcpu(storage, WSBM_SYNCCPU_WRITE |
+ WSBM_SYNCCPU_DONT_BLOCK)) {
+ /*
+ * Buffer is busy. need to create a new one.
+ */
+
+ struct _WsbmBufStorage *tmp_storage;
+ curPool = storage->pool;
+
+ tmp_storage = curPool->create(curPool, size, placement, buf->alignment);
+
+ if (tmp_storage) {
+ wsbmBufStorageUnref(&buf->storage);
+ buf->storage = tmp_storage;
+ buf->placement = placement;
+ } else {
+ retval = curPool->syncforcpu(storage, WSBM_SYNCCPU_WRITE);
+ if (retval)
+ goto out;
+ synced = 1;
+ }
+ } else
+ synced = 1;
+
+ placement_diff = placement ^ buf->placement;
+
+ /*
+ * We might need to change buffer placement.
+ */
+
+ storage = buf->storage;
+ curPool = storage->pool;
+
+ if (placement_diff) {
+ assert(curPool->setStatus != NULL);
+ curPool->releasefromcpu(storage, WSBM_SYNCCPU_WRITE);
+ retval = curPool->setStatus(storage,
+ placement_diff & placement,
+ placement_diff & ~placement);
+ if (retval)
+ goto out;
+
+ buf->placement = placement;
+
+ }
+
+ if (!synced) {
+ retval = curPool->syncforcpu(buf->storage, WSBM_SYNCCPU_WRITE);
+
+ if (retval)
+ goto out;
+ synced = 1;
+ }
+
+ storage = buf->storage;
+ curPool = storage->pool;
+
+ if (data) {
+ retval = curPool->map(storage, WSBM_ACCESS_WRITE, &virtual);
+ if (retval)
+ goto out;
+ memcpy(virtual, data, size);
+ curPool->unmap(storage);
+ }
+
+ out:
+
+ if (synced)
+ curPool->releasefromcpu(storage, WSBM_SYNCCPU_WRITE);
+
+ return retval;
+}
+
+static struct _WsbmBufStorage *
+wsbmStorageClone(struct _WsbmBufferObject *buf)
+{
+ struct _WsbmBufStorage *storage = buf->storage;
+ struct _WsbmBufferPool *pool = storage->pool;
+
+ return pool->create(pool, pool->size(storage), buf->placement,
+ buf->alignment);
+}
+
+struct _WsbmBufferObject *
+wsbmBOClone(struct _WsbmBufferObject *buf,
+ int (*accelCopy) (struct _WsbmBufferObject *,
+ struct _WsbmBufferObject *))
+{
+ struct _WsbmBufferObject *newBuf;
+ int ret;
+
+ newBuf = malloc(sizeof(*newBuf));
+ if (!newBuf)
+ return NULL;
+
+ *newBuf = *buf;
+ newBuf->storage = wsbmStorageClone(buf);
+ if (!newBuf->storage)
+ goto out_err0;
+
+ wsbmAtomicSet(&newBuf->refCount, 1);
+ if (!accelCopy || accelCopy(newBuf, buf) != 0) {
+
+ struct _WsbmBufferPool *pool = buf->storage->pool;
+ struct _WsbmBufStorage *storage = buf->storage;
+ struct _WsbmBufStorage *newStorage = newBuf->storage;
+ void *virtual;
+ void *nVirtual;
+
+ ret = pool->syncforcpu(storage, WSBM_SYNCCPU_READ);
+ if (ret)
+ goto out_err1;
+ ret = pool->map(storage, WSBM_ACCESS_READ, &virtual);
+ if (ret)
+ goto out_err2;
+ ret = pool->map(newStorage, WSBM_ACCESS_WRITE, &nVirtual);
+ if (ret)
+ goto out_err3;
+
+ memcpy(nVirtual, virtual, pool->size(storage));
+ pool->unmap(newBuf->storage);
+ pool->unmap(buf->storage);
+ pool->releasefromcpu(storage, WSBM_SYNCCPU_READ);
+ }
+
+ return newBuf;
+ out_err3:
+ buf->pool->unmap(buf->storage);
+ out_err2:
+ buf->pool->releasefromcpu(buf->storage, WSBM_SYNCCPU_READ);
+ out_err1:
+ wsbmBufStorageUnref(&newBuf->storage);
+ out_err0:
+ free(newBuf);
+ return 0;
+}
+
+
+int
+wsbmBOSubData(struct _WsbmBufferObject *buf,
+ unsigned long offset, unsigned long size, const void *data,
+ int (*accelCopy) (struct _WsbmBufferObject *,
+ struct _WsbmBufferObject *))
+{
+ int ret = 0;
+
+ if (buf->bufferType == WSBM_BUFFER_SIMPLE)
+ return -EINVAL;
+
+ if (size && data) {
+ void *virtual;
+ struct _WsbmBufStorage *storage = buf->storage;
+ struct _WsbmBufferPool *pool = storage->pool;
+
+ ret = pool->syncforcpu(storage, WSBM_SYNCCPU_WRITE);
+ if (ret)
+ goto out;
+
+ if (wsbmAtomicRead(&storage->onList)) {
+
+ struct _WsbmBufferObject *newBuf;
+
+ /*
+ * Another context has this buffer on its validate list.
+ * This should be a very rare situation, but it can be valid,
+ * and therefore we must deal with it by cloning the storage.
+ */
+
+ pool->releasefromcpu(storage, WSBM_SYNCCPU_WRITE);
+ newBuf = wsbmBOClone(buf, accelCopy);
+
+ /*
+ * If clone fails we have the choice of either bailing.
+ * (The other context will be happy), or go on and update
+ * the old buffer anyway. (We will be happy). We choose the
+ * latter.
+ */
+
+ if (newBuf) {
+ storage = newBuf->storage;
+ wsbmAtomicInc(&storage->refCount);
+ wsbmBufStorageUnref(&buf->storage);
+ buf->storage = storage;
+ wsbmBOUnreference(&newBuf);
+ pool = storage->pool;
+ }
+
+ ret = pool->syncforcpu(storage, WSBM_SYNCCPU_WRITE);
+ if (ret)
+ goto out;
+ }
+
+ ret = pool->map(storage, WSBM_ACCESS_WRITE, &virtual);
+ if (ret) {
+ pool->releasefromcpu(storage, WSBM_SYNCCPU_WRITE);
+ goto out;
+ }
+
+ memcpy((unsigned char *)virtual + offset, data, size);
+ pool->unmap(storage);
+ pool->releasefromcpu(storage, WSBM_SYNCCPU_WRITE);
+ }
+ out:
+ return ret;
+}
+
+int
+wsbmBOGetSubData(struct _WsbmBufferObject *buf,
+ unsigned long offset, unsigned long size, void *data)
+{
+ int ret = 0;
+
+ if (size && data) {
+ void *virtual;
+ struct _WsbmBufStorage *storage = buf->storage;
+ struct _WsbmBufferPool *pool = storage->pool;
+
+ ret = pool->syncforcpu(storage, WSBM_SYNCCPU_READ);
+ if (ret)
+ goto out;
+ ret = pool->map(storage, WSBM_ACCESS_READ, &virtual);
+ if (ret) {
+ pool->releasefromcpu(storage, WSBM_SYNCCPU_WRITE);
+ goto out;
+ }
+ memcpy(data, (unsigned char *)virtual + offset, size);
+ pool->unmap(storage);
+ pool->releasefromcpu(storage, WSBM_SYNCCPU_WRITE);
+ }
+ out:
+ return ret;
+}
+
+int
+wsbmBOSetReferenced(struct _WsbmBufferObject *buf, unsigned long handle)
+{
+ int ret = 0;
+
+ wsbmBufStorageUnref(&buf->storage);
+ if (buf->pool->createByReference == NULL) {
+ ret = -EINVAL;
+ goto out;
+ }
+ buf->storage = buf->pool->createByReference(buf->pool, handle);
+ if (!buf->storage) {
+ ret = -EINVAL;
+ goto out;
+ }
+ buf->bufferType = WSBM_BUFFER_REF;
+ out:
+ return ret;
+}
+
+void wsbmBOFreeSimple(void *ptr)
+{
+ free(ptr);
+}
+
+struct _WsbmBufferObject *
+wsbmBOCreateSimple(struct _WsbmBufferPool *pool,
+ unsigned long size,
+ uint32_t placement,
+ unsigned alignment,
+ size_t extra_size,
+ size_t *offset)
+{
+ struct _WsbmBufferObject *buf;
+ struct _WsbmBufStorage *storage;
+
+ *offset = (sizeof(*buf) + 15) & ~15;
+
+ if (extra_size) {
+ extra_size += *offset - sizeof(*buf);
+ }
+
+ buf = (struct _WsbmBufferObject *)calloc(1, sizeof(*buf) + extra_size);
+ if (!buf)
+ return NULL;
+
+ storage = pool->create(pool, size, placement, alignment);
+ if (!storage)
+ goto out_err0;
+
+ storage->destroyContainer = &wsbmBOFreeSimple;
+ storage->destroyArg = buf;
+
+ buf->storage = storage;
+ buf->alignment = alignment;
+ buf->pool = pool;
+ buf->placement = placement;
+ buf->bufferType = WSBM_BUFFER_SIMPLE;
+
+ return buf;
+
+ out_err0:
+ free(buf);
+ return NULL;
+}
+
+
+
+int
+wsbmGenBuffers(struct _WsbmBufferPool *pool,
+ unsigned n,
+ struct _WsbmBufferObject *buffers[],
+ unsigned alignment, uint32_t placement)
+{
+ struct _WsbmBufferObject *buf;
+ int i;
+
+ placement = (placement) ? placement :
+ WSBM_PL_FLAG_SYSTEM | WSBM_PL_FLAG_CACHED;
+
+ for (i = 0; i < n; ++i) {
+ buf = (struct _WsbmBufferObject *)calloc(1, sizeof(*buf));
+ if (!buf)
+ return -ENOMEM;
+
+ wsbmAtomicSet(&buf->refCount, 1);
+ buf->placement = placement;
+ buf->alignment = alignment;
+ buf->pool = pool;
+ buf->bufferType = WSBM_BUFFER_COMPLEX;
+ buffers[i] = buf;
+ }
+ return 0;
+}
+
+void
+wsbmDeleteBuffers(unsigned n, struct _WsbmBufferObject *buffers[])
+{
+ int i;
+
+ for (i = 0; i < n; ++i) {
+ wsbmBOUnreference(&buffers[i]);
+ }
+}
+
+/*
+ * Note that lists are per-context and don't need mutex protection.
+ */
+
+struct _WsbmBufferList *
+wsbmBOCreateList(int target, int hasKernelBuffers)
+{
+ struct _WsbmBufferList *list = calloc(sizeof(*list), 1);
+ int ret;
+
+ list->hasKernelBuffers = hasKernelBuffers;
+ if (hasKernelBuffers) {
+ ret = validateCreateList(target, &list->kernelBuffers, 0);
+ if (ret)
+ return NULL;
+ }
+
+ ret = validateCreateList(target, &list->userBuffers, 1);
+ if (ret) {
+ validateFreeList(&list->kernelBuffers);
+ return NULL;
+ }
+
+ return list;
+}
+
+int
+wsbmBOResetList(struct _WsbmBufferList *list)
+{
+ int ret;
+
+ if (list->hasKernelBuffers) {
+ ret = validateResetList(&list->kernelBuffers);
+ if (ret)
+ return ret;
+ }
+ ret = validateResetList(&list->userBuffers);
+ return ret;
+}
+
+void
+wsbmBOFreeList(struct _WsbmBufferList *list)
+{
+ if (list->hasKernelBuffers)
+ validateFreeList(&list->kernelBuffers);
+ validateFreeList(&list->userBuffers);
+ free(list);
+}
+
+static int
+wsbmAddValidateItem(struct _ValidateList *list, void *buf, uint64_t flags,
+ uint64_t mask, int *itemLoc,
+ struct _ValidateNode **pnode, int *newItem)
+{
+ struct _ValidateNode *node, *cur;
+ struct _WsbmListHead *l;
+ struct _WsbmListHead *hashHead;
+ uint32_t hash;
+
+ cur = NULL;
+ hash = wsbmHashFunc((uint8_t *) buf, (sizeof(buf)));
+ hashHead = list->hashTable + hash;
+ *newItem = 0;
+
+ for (l = hashHead->next; l != hashHead; l = l->next) {
+ node = WSBMLISTENTRY(l, struct _ValidateNode, hashHead);
+
+ if (node->buf == buf) {
+ cur = node;
+ break;
+ }
+ }
+ if (!cur) {
+ cur = validateListAddNode(list, buf, hash, flags, mask);
+ if (!cur)
+ return -ENOMEM;
+ *newItem = 1;
+ cur->func->clear(cur);
+ } else {
+ uint64_t set_flags = flags & mask;
+ uint64_t clr_flags = (~flags) & mask;
+
+ if (((cur->clr_flags | clr_flags) & WSBM_PL_MASK_MEM) ==
+ WSBM_PL_MASK_MEM) {
+ /*
+ * No available memory type left. Bail.
+ */
+ return -EINVAL;
+ }
+
+ if ((cur->set_flags | set_flags) &
+ (cur->clr_flags | clr_flags) & ~WSBM_PL_MASK_MEM) {
+ /*
+ * Conflicting flags. Bail.
+ */
+ return -EINVAL;
+ }
+
+ cur->set_flags &= ~(clr_flags & WSBM_PL_MASK_MEM);
+ cur->set_flags |= (set_flags & ~WSBM_PL_MASK_MEM);
+ cur->clr_flags |= clr_flags;
+ }
+ *itemLoc = cur->listItem;
+ if (pnode)
+ *pnode = cur;
+ return 0;
+}
+
+int
+wsbmBOAddListItem(struct _WsbmBufferList *list,
+ struct _WsbmBufferObject *buf,
+ uint64_t flags, uint64_t mask, int *itemLoc,
+ struct _ValidateNode **node)
+{
+ int newItem;
+ struct _WsbmBufStorage *storage = buf->storage;
+ int ret;
+ int dummy;
+ struct _ValidateNode *dummyNode;
+
+ if (list->hasKernelBuffers) {
+ ret = wsbmAddValidateItem(&list->kernelBuffers,
+ storage->pool->kernel(storage),
+ flags, mask, itemLoc, node, &dummy);
+ if (ret)
+ goto out_unlock;
+ } else {
+ *node = NULL;
+ *itemLoc = -1000;
+ }
+
+ ret = wsbmAddValidateItem(&list->userBuffers, storage,
+ flags, mask, &dummy, &dummyNode, &newItem);
+ if (ret)
+ goto out_unlock;
+
+ if (newItem) {
+ wsbmAtomicInc(&storage->refCount);
+ wsbmAtomicInc(&storage->onList);
+ }
+
+ out_unlock:
+ return ret;
+}
+
+void
+wsbmBOFence(struct _WsbmBufferObject *buf, struct _WsbmFenceObject *fence)
+{
+ struct _WsbmBufStorage *storage;
+
+ storage = buf->storage;
+ if (storage->pool->fence)
+ storage->pool->fence(storage, fence);
+
+}
+
+int
+wsbmBOOnList(const struct _WsbmBufferObject *buf)
+{
+ if (buf->storage == NULL)
+ return 0;
+ return wsbmAtomicRead(&buf->storage->onList);
+}
+
+
+int
+wsbmBOUnrefUserList(struct _WsbmBufferList *list)
+{
+ struct _WsbmBufStorage *storage;
+ void *curBuf;
+
+ curBuf = validateListIterator(&list->userBuffers);
+
+ while (curBuf) {
+ storage = (struct _WsbmBufStorage *)(validateListNode(curBuf)->buf);
+ wsbmAtomicDec(&storage->onList);
+ wsbmBufStorageUnref(&storage);
+ curBuf = validateListNext(&list->userBuffers, curBuf);
+ }
+
+ return wsbmBOResetList(list);
+}
+
+int
+wsbmBOFenceUserList(struct _WsbmBufferList *list,
+ struct _WsbmFenceObject *fence)
+{
+ struct _WsbmBufStorage *storage;
+ void *curBuf;
+
+ curBuf = validateListIterator(&list->userBuffers);
+
+ /*
+ * User-space fencing callbacks.
+ */
+
+ while (curBuf) {
+ storage = (struct _WsbmBufStorage *)(validateListNode(curBuf)->buf);
+
+ storage->pool->fence(storage, fence);
+ wsbmAtomicDec(&storage->onList);
+ wsbmBufStorageUnref(&storage);
+ curBuf = validateListNext(&list->userBuffers, curBuf);
+ }
+
+ return wsbmBOResetList(list);
+}
+
+int
+wsbmBOValidateUserList(struct _WsbmBufferList *list)
+{
+ void *curBuf;
+ struct _WsbmBufStorage *storage;
+ struct _ValidateNode *node;
+ int ret;
+
+ curBuf = validateListIterator(&list->userBuffers);
+
+ /*
+ * User-space validation callbacks.
+ */
+
+ while (curBuf) {
+ node = validateListNode(curBuf);
+ storage = (struct _WsbmBufStorage *)node->buf;
+ if (storage->pool->validate) {
+ ret = storage->pool->validate(storage, node->set_flags,
+ node->clr_flags);
+ if (ret)
+ return ret;
+ }
+ curBuf = validateListNext(&list->userBuffers, curBuf);
+ }
+ return 0;
+}
+
+
+int
+wsbmBOUnvalidateUserList(struct _WsbmBufferList *list)
+{
+ void *curBuf;
+ struct _WsbmBufStorage *storage;
+ struct _ValidateNode *node;
+
+ curBuf = validateListIterator(&list->userBuffers);
+
+ /*
+ * User-space validation callbacks.
+ */
+
+ while (curBuf) {
+ node = validateListNode(curBuf);
+ storage = (struct _WsbmBufStorage *)node->buf;
+ if (storage->pool->unvalidate) {
+ storage->pool->unvalidate(storage);
+ }
+ wsbmAtomicDec(&storage->onList);
+ wsbmBufStorageUnref(&storage);
+ curBuf = validateListNext(&list->userBuffers, curBuf);
+ }
+ return wsbmBOResetList(list);
+}
+
+void
+wsbmPoolTakeDown(struct _WsbmBufferPool *pool)
+{
+ pool->takeDown(pool);
+
+}
+
+unsigned long
+wsbmBOSize(struct _WsbmBufferObject *buf)
+{
+ unsigned long size;
+ struct _WsbmBufStorage *storage;
+
+ storage = buf->storage;
+ size = storage->pool->size(storage);
+
+ return size;
+
+}
+
+struct _ValidateList *
+wsbmGetKernelValidateList(struct _WsbmBufferList *list)
+{
+ return (list->hasKernelBuffers) ? &list->kernelBuffers : NULL;
+}
+
+struct _ValidateList *
+wsbmGetUserValidateList(struct _WsbmBufferList *list)
+{
+ return &list->userBuffers;
+}
+
+struct _ValidateNode *
+validateListNode(void *iterator)
+{
+ struct _WsbmListHead *l = (struct _WsbmListHead *)iterator;
+
+ return WSBMLISTENTRY(l, struct _ValidateNode, head);
+}
+
+void *
+validateListIterator(struct _ValidateList *list)
+{
+ void *ret = list->list.next;
+
+ if (ret == &list->list)
+ return NULL;
+ return ret;
+}
+
+void *
+validateListNext(struct _ValidateList *list, void *iterator)
+{
+ void *ret;
+
+ struct _WsbmListHead *l = (struct _WsbmListHead *)iterator;
+
+ ret = l->next;
+ if (ret == &list->list)
+ return NULL;
+ return ret;
+}
+
+uint32_t
+wsbmKBufHandle(const struct _WsbmKernelBuf * kBuf)
+{
+ return kBuf->handle;
+}
+
+extern void
+wsbmUpdateKBuf(struct _WsbmKernelBuf *kBuf,
+ uint64_t gpuOffset, uint32_t placement,
+ uint32_t fence_type_mask)
+{
+ kBuf->gpuOffset = gpuOffset;
+ kBuf->placement = placement;
+ kBuf->fence_type_mask = fence_type_mask;
+}
+
+extern struct _WsbmKernelBuf *
+wsbmKBuf(const struct _WsbmBufferObject *buf)
+{
+ struct _WsbmBufStorage *storage = buf->storage;
+
+ return storage->pool->kernel(storage);
+}
diff --git a/src/wsbm_manager.h b/src/wsbm_manager.h
new file mode 100644
index 0000000..e9efb62
--- /dev/null
+++ b/src/wsbm_manager.h
@@ -0,0 +1,183 @@
+/**************************************************************************
+ *
+ * Copyright 2006-2008 Tungsten Graphics, Inc., Cedar Park, TX., USA
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ *
+ **************************************************************************/
+/*
+ * Authors: Thomas Hellström <thomas-at-tungstengraphics-dot-com>
+ * Keith Whitwell <keithw-at-tungstengraphics-dot-com>
+ */
+
+#ifndef _WSBM_MANAGER_H_
+#define _WSBM_MANAGER_H_
+#include "wsbm_fencemgr.h"
+#include "wsbm_util.h"
+#include "wsbm_driver.h"
+
+#define WSBM_VERSION_MAJOR 1
+#define WSBM_VERSION_MINOR 1
+#define WSBM_VERSION_PL 0
+
+struct _WsbmFenceObject;
+struct _WsbmBufferObject;
+struct _WsbmBufferPool;
+struct _WsbmBufferList;
+
+/*
+ * These flags mimics the TTM closely, but since
+ * this library is not dependant on TTM, we need to
+ * replicate them here, and if there is a discrepancy,
+ * that needs to be resolved in the buffer pool using
+ * the TTM flags.
+ */
+
+#define WSBM_PL_MASK_MEM 0x0000FFFF
+
+#define WSBM_PL_FLAG_SYSTEM (1 << 0)
+#define WSBM_PL_FLAG_TT (1 << 1)
+#define WSBM_PL_FLAG_VRAM (1 << 2)
+#define WSBM_PL_FLAG_PRIV0 (1 << 3)
+#define WSBM_PL_FLAG_SWAPPED (1 << 15)
+#define WSBM_PL_FLAG_CACHED (1 << 16)
+#define WSBM_PL_FLAG_UNCACHED (1 << 17)
+#define WSBM_PL_FLAG_WC (1 << 18)
+#define WSBM_PL_FLAG_SHARED (1 << 20)
+#define WSBM_PL_FLAG_NO_EVICT (1 << 21)
+
+#define WSBM_ACCESS_READ (1 << 0)
+#define WSBM_ACCESS_WRITE (1 << 1)
+
+#define WSBM_SYNCCPU_READ WSBM_ACCESS_READ
+#define WSBM_SYNCCPU_WRITE WSBM_ACCESS_WRITE
+#define WSBM_SYNCCPU_DONT_BLOCK (1 << 2)
+#define WSBM_SYNCCPU_TRY_CACHED (1 << 3)
+
+extern void *wsbmBOMap(struct _WsbmBufferObject *buf, unsigned mode);
+extern void wsbmBOUnmap(struct _WsbmBufferObject *buf);
+extern int wsbmBOSyncForCpu(struct _WsbmBufferObject *buf,
+ unsigned mode);
+extern void wsbmBOReleaseFromCpu(struct _WsbmBufferObject *buf,
+ unsigned mode);
+
+extern unsigned long wsbmBOOffsetHint(struct _WsbmBufferObject *buf);
+extern unsigned long wsbmBOPoolOffset(struct _WsbmBufferObject *buf);
+
+extern uint32_t wsbmBOPlacementHint(struct _WsbmBufferObject *buf);
+extern struct _WsbmBufferObject *wsbmBOReference(struct _WsbmBufferObject *buf);
+extern void wsbmBOUnreference(struct _WsbmBufferObject **p_buf);
+
+extern int wsbmBOData(struct _WsbmBufferObject *r_buf,
+ unsigned size, const void *data,
+ struct _WsbmBufferPool *pool, uint32_t placement);
+extern int wsbmBOSetStatus(struct _WsbmBufferObject *buf,
+ uint32_t setPlacement,
+ uint32_t clrPlacement);
+extern int wsbmBOSubData(struct _WsbmBufferObject *buf,
+ unsigned long offset, unsigned long size,
+ const void *data,
+ int (*accelCopy) (struct _WsbmBufferObject *,
+ struct _WsbmBufferObject *));
+extern struct _WsbmBufferObject *
+wsbmBOClone(struct _WsbmBufferObject *buf,
+ int (*accelCopy) (struct _WsbmBufferObject *,
+ struct _WsbmBufferObject *));
+
+extern int wsbmBOGetSubData(struct _WsbmBufferObject *buf,
+ unsigned long offset, unsigned long size,
+ void *data);
+extern int wsbmGenBuffers(struct _WsbmBufferPool *pool,
+ unsigned n,
+ struct _WsbmBufferObject *buffers[],
+ unsigned alignment, uint32_t placement);
+
+struct _WsbmBufferObject *
+wsbmBOCreateSimple(struct _WsbmBufferPool *pool,
+ unsigned long size,
+ uint32_t placement,
+ unsigned alignment,
+ size_t extra_size,
+ size_t *offset);
+
+extern void wsbmDeleteBuffers(unsigned n, struct _WsbmBufferObject *buffers[]);
+extern struct _WsbmBufferList *wsbmBOCreateList(int target,
+ int hasKernelBuffers);
+extern int wsbmBOResetList(struct _WsbmBufferList *list);
+extern int wsbmBOAddListItem(struct _WsbmBufferList *list,
+ struct _WsbmBufferObject *buf,
+ uint64_t flags, uint64_t mask, int *itemLoc,
+ struct _ValidateNode **node);
+
+extern void wsbmBOFreeList(struct _WsbmBufferList *list);
+extern int wsbmBOFenceUserList(struct _WsbmBufferList *list,
+ struct _WsbmFenceObject *fence);
+
+extern int wsbmBOUnrefUserList(struct _WsbmBufferList *list);
+extern int wsbmBOValidateUserList(struct _WsbmBufferList *list);
+extern int wsbmBOUnvalidateUserList(struct _WsbmBufferList *list);
+
+extern void wsbmBOFence(struct _WsbmBufferObject *buf,
+ struct _WsbmFenceObject *fence);
+
+extern void wsbmPoolTakeDown(struct _WsbmBufferPool *pool);
+extern int wsbmBOSetReferenced(struct _WsbmBufferObject *buf,
+ unsigned long handle);
+unsigned long wsbmBOSize(struct _WsbmBufferObject *buf);
+extern void wsbmBOWaitIdle(struct _WsbmBufferObject *buf, int lazy);
+extern int wsbmBOOnList(const struct _WsbmBufferObject *buf);
+
+extern void wsbmPoolTakeDown(struct _WsbmBufferPool *pool);
+
+extern void wsbmReadLockKernelBO(void);
+extern void wsbmReadUnlockKernelBO(void);
+extern void wsbmWriteLockKernelBO(void);
+extern void wsbmWriteUnlockKernelBO(void);
+
+extern int wsbmInit(struct _WsbmThreadFuncs *tf,
+ struct _WsbmVNodeFuncs *vf);
+extern void wsbmTakedown(void);
+extern int wsbmIsInitialized(void);
+extern void wsbmCommonDataSet(void *d);
+extern void *wsbmCommonDataGet(void);
+
+
+
+extern struct _ValidateList *wsbmGetKernelValidateList(struct _WsbmBufferList
+ *list);
+extern struct _ValidateList *wsbmGetUserValidateList(struct _WsbmBufferList
+ *list);
+
+extern struct _ValidateNode *validateListNode(void *iterator);
+extern void *validateListIterator(struct _ValidateList *list);
+extern void *validateListNext(struct _ValidateList *list, void *iterator);
+
+extern uint32_t wsbmKBufHandle(const struct _WsbmKernelBuf *);
+extern void wsbmUpdateKBuf(struct _WsbmKernelBuf *,
+ uint64_t gpuOffset,
+ uint32_t placement,
+ uint32_t fence_flags);
+
+extern struct _WsbmKernelBuf *wsbmKBuf(const struct _WsbmBufferObject *buf);
+
+#endif
diff --git a/src/wsbm_mm.c b/src/wsbm_mm.c
new file mode 100644
index 0000000..e3c32fe
--- /dev/null
+++ b/src/wsbm_mm.c
@@ -0,0 +1,286 @@
+/**************************************************************************
+ *
+ * Copyright 2006-2008 Tungsten Graphics, Inc., Cedar Park, TX., USA.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ *
+ **************************************************************************/
+
+/*
+ * Generic simple memory manager implementation. Intended to be used as a base
+ * class implementation for more advanced memory managers.
+ *
+ * Note that the algorithm used is quite simple and there might be substantial
+ * performance gains if a smarter free list is implemented. Currently it is just an
+ * unordered stack of free regions. This could easily be improved if an RB-tree
+ * is used instead. At least if we expect heavy fragmentation.
+ *
+ * Authors:
+ * Thomas Hellström <thomas-at-tungstengraphics-dot-com>
+ */
+
+#ifdef HAVE_CONFIG_H
+#include "config.h"
+#endif
+
+#include "wsbm_mm.h"
+#include <errno.h>
+#include <stdlib.h>
+
+unsigned long
+wsbmMMTailSpace(struct _WsbmMM *mm)
+{
+ struct _WsbmListHead *tail_node;
+ struct _WsbmMMNode *entry;
+
+ tail_node = mm->ml_entry.prev;
+ entry = WSBMLISTENTRY(tail_node, struct _WsbmMMNode, ml_entry);
+
+ if (!entry->free)
+ return 0;
+
+ return entry->size;
+}
+
+int
+wsbmMMRemoveSpaceFromTail(struct _WsbmMM *mm, unsigned long size)
+{
+ struct _WsbmListHead *tail_node;
+ struct _WsbmMMNode *entry;
+
+ tail_node = mm->ml_entry.prev;
+ entry = WSBMLISTENTRY(tail_node, struct _WsbmMMNode, ml_entry);
+
+ if (!entry->free)
+ return -ENOMEM;
+
+ if (entry->size <= size)
+ return -ENOMEM;
+
+ entry->size -= size;
+ return 0;
+}
+
+static int
+wsbmMMCreateTailNode(struct _WsbmMM *mm,
+ unsigned long start, unsigned long size)
+{
+ struct _WsbmMMNode *child;
+
+ child = (struct _WsbmMMNode *)malloc(sizeof(*child));
+ if (!child)
+ return -ENOMEM;
+
+ child->free = 1;
+ child->size = size;
+ child->start = start;
+ child->mm = mm;
+
+ WSBMLISTADDTAIL(&child->ml_entry, &mm->ml_entry);
+ WSBMLISTADDTAIL(&child->fl_entry, &mm->fl_entry);
+
+ return 0;
+}
+
+static struct _WsbmMMNode *
+wsbmMMSplitAtStart(struct _WsbmMMNode *parent, unsigned long size)
+{
+ struct _WsbmMMNode *child;
+
+ child = (struct _WsbmMMNode *)malloc(sizeof(*child));
+ if (!child)
+ return NULL;
+
+ WSBMINITLISTHEAD(&child->fl_entry);
+
+ child->free = 0;
+ child->size = size;
+ child->start = parent->start;
+ child->mm = parent->mm;
+
+ WSBMLISTADDTAIL(&child->ml_entry, &parent->ml_entry);
+ WSBMINITLISTHEAD(&child->fl_entry);
+
+ parent->size -= size;
+ parent->start += size;
+ return child;
+}
+
+struct _WsbmMMNode *
+wsbmMMGetBlock(struct _WsbmMMNode *parent,
+ unsigned long size, unsigned alignment)
+{
+
+ struct _WsbmMMNode *align_splitoff = NULL;
+ struct _WsbmMMNode *child;
+ unsigned tmp = 0;
+
+ if (alignment)
+ tmp = parent->start % alignment;
+
+ if (tmp) {
+ align_splitoff = wsbmMMSplitAtStart(parent, alignment - tmp);
+ if (!align_splitoff)
+ return NULL;
+ }
+
+ if (parent->size == size) {
+ WSBMLISTDELINIT(&parent->fl_entry);
+ parent->free = 0;
+ return parent;
+ } else {
+ child = wsbmMMSplitAtStart(parent, size);
+ }
+
+ if (align_splitoff)
+ wsbmMMPutBlock(align_splitoff);
+
+ return child;
+}
+
+/*
+ * Put a block. Merge with the previous and / or next block if they are free.
+ * Otherwise add to the free stack.
+ */
+
+void
+wsbmMMPutBlock(struct _WsbmMMNode *cur)
+{
+
+ struct _WsbmMM *mm = cur->mm;
+ struct _WsbmListHead *cur_head = &cur->ml_entry;
+ struct _WsbmListHead *root_head = &mm->ml_entry;
+ struct _WsbmMMNode *prev_node = NULL;
+ struct _WsbmMMNode *next_node;
+
+ int merged = 0;
+
+ if (cur_head->prev != root_head) {
+ prev_node =
+ WSBMLISTENTRY(cur_head->prev, struct _WsbmMMNode, ml_entry);
+ if (prev_node->free) {
+ prev_node->size += cur->size;
+ merged = 1;
+ }
+ }
+ if (cur_head->next != root_head) {
+ next_node =
+ WSBMLISTENTRY(cur_head->next, struct _WsbmMMNode, ml_entry);
+ if (next_node->free) {
+ if (merged) {
+ prev_node->size += next_node->size;
+ WSBMLISTDEL(&next_node->ml_entry);
+ WSBMLISTDEL(&next_node->fl_entry);
+ free(next_node);
+ } else {
+ next_node->size += cur->size;
+ next_node->start = cur->start;
+ merged = 1;
+ }
+ }
+ }
+ if (!merged) {
+ cur->free = 1;
+ WSBMLISTADD(&cur->fl_entry, &mm->fl_entry);
+ } else {
+ WSBMLISTDEL(&cur->ml_entry);
+ free(cur);
+ }
+}
+
+struct _WsbmMMNode *
+wsbmMMSearchFree(const struct _WsbmMM *mm,
+ unsigned long size, unsigned alignment, int best_match)
+{
+ struct _WsbmListHead *list;
+ const struct _WsbmListHead *free_stack = &mm->fl_entry;
+ struct _WsbmMMNode *entry;
+ struct _WsbmMMNode *best;
+ unsigned long best_size;
+ unsigned wasted;
+
+ best = NULL;
+ best_size = ~0UL;
+
+ WSBMLISTFOREACH(list, free_stack) {
+ entry = WSBMLISTENTRY(list, struct _WsbmMMNode, fl_entry);
+
+ wasted = 0;
+
+ if (entry->size < size)
+ continue;
+
+ if (alignment) {
+ register unsigned tmp = entry->start % alignment;
+
+ if (tmp)
+ wasted += alignment - tmp;
+ }
+
+ if (entry->size >= size + wasted) {
+ if (!best_match)
+ return entry;
+ if (size < best_size) {
+ best = entry;
+ best_size = entry->size;
+ }
+ }
+ }
+
+ return best;
+}
+
+int
+wsbmMMclean(struct _WsbmMM *mm)
+{
+ struct _WsbmListHead *head = &mm->ml_entry;
+
+ return (head->next->next == head);
+}
+
+int
+wsbmMMinit(struct _WsbmMM *mm, unsigned long start, unsigned long size)
+{
+ WSBMINITLISTHEAD(&mm->ml_entry);
+ WSBMINITLISTHEAD(&mm->fl_entry);
+
+ return wsbmMMCreateTailNode(mm, start, size);
+}
+
+void
+wsbmMMtakedown(struct _WsbmMM *mm)
+{
+ struct _WsbmListHead *bnode = mm->fl_entry.next;
+ struct _WsbmMMNode *entry;
+
+ entry = WSBMLISTENTRY(bnode, struct _WsbmMMNode, fl_entry);
+
+ if (entry->ml_entry.next != &mm->ml_entry ||
+ entry->fl_entry.next != &mm->fl_entry) {
+ return;
+ }
+
+ WSBMLISTDEL(&entry->fl_entry);
+ WSBMLISTDEL(&entry->ml_entry);
+ free(entry);
+}
diff --git a/src/wsbm_mm.h b/src/wsbm_mm.h
new file mode 100644
index 0000000..9d97a23
--- /dev/null
+++ b/src/wsbm_mm.h
@@ -0,0 +1,73 @@
+/**************************************************************************
+ *
+ * Copyright 2006-2008 Tungsten Graphics, Inc., Cedar Park, TX., USA.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ *
+ **************************************************************************/
+
+/*
+ * Generic simple memory manager implementation. Intended to be used as a base
+ * class implementation for more advanced memory managers.
+ *
+ * Note that the algorithm used is quite simple and there might be substantial
+ * performance gains if a smarter free list is implemented. Currently it is just an
+ * unordered stack of free regions. This could easily be improved if an RB-tree
+ * is used instead. At least if we expect heavy fragmentation.
+ *
+ * Authors:
+ * Thomas Hellstrom <thomas-at-tungstengraphics-dot-com>
+ */
+
+#ifndef _WSBM_MM_H_
+#define _WSBM_MM_H_
+
+#include "wsbm_util.h"
+struct _WsbmMM
+{
+ struct _WsbmListHead fl_entry;
+ struct _WsbmListHead ml_entry;
+};
+
+struct _WsbmMMNode
+{
+ struct _WsbmListHead fl_entry;
+ struct _WsbmListHead ml_entry;
+ int free;
+ unsigned long start;
+ unsigned long size;
+ struct _WsbmMM *mm;
+};
+
+extern struct _WsbmMMNode *wsbmMMSearchFree(const struct _WsbmMM *mm,
+ unsigned long size,
+ unsigned alignment,
+ int best_match);
+extern struct _WsbmMMNode *wsbmMMGetBlock(struct _WsbmMMNode *parent,
+ unsigned long size,
+ unsigned alignment);
+extern void wsbmMMPutBlock(struct _WsbmMMNode *cur);
+extern void wsbmMMtakedown(struct _WsbmMM *mm);
+extern int wsbmMMinit(struct _WsbmMM *mm, unsigned long start, unsigned long size);
+extern int wsbmMMclean(struct _WsbmMM *mm);
+#endif
diff --git a/src/wsbm_pool.h b/src/wsbm_pool.h
new file mode 100644
index 0000000..37aaaa5
--- /dev/null
+++ b/src/wsbm_pool.h
@@ -0,0 +1,156 @@
+/**************************************************************************
+ *
+ * Copyright 2006-2008 Tungsten Graphics, Inc., Cedar Park, TX., USA
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ *
+ **************************************************************************/
+/*
+ * Authors: Thomas Hellström <thomas-at-tungstengraphics-dot-com>
+ */
+
+#ifndef _WSBM_BUFPOOL_H_
+#define _WSBM_BUFPOOL_H_
+
+#include <errno.h>
+#include "wsbm_util.h"
+#include "wsbm_driver.h"
+#include "wsbm_atomic.h"
+
+struct _WsbmFenceObject;
+
+struct _WsbmBufStorage
+{
+ struct _WsbmBufferPool *pool;
+ struct _WsbmMutex mutex;
+ struct _WsbmAtomic refCount;
+ struct _WsbmAtomic onList;
+ void * destroyArg;
+ void (*destroyContainer) (void *);
+};
+
+struct _WsbmKernelBuf;
+
+struct _WsbmBufferPool
+{
+ int fd;
+ int (*map) (struct _WsbmBufStorage * buf, unsigned mode, void **virtual);
+ void (*unmap) (struct _WsbmBufStorage * buf);
+ int (*syncforcpu) (struct _WsbmBufStorage *buf, unsigned mode);
+ void (*releasefromcpu) (struct _WsbmBufStorage *buf, unsigned mode);
+ void (*destroy) (struct _WsbmBufStorage ** buf);
+ unsigned long (*offset) (struct _WsbmBufStorage * buf);
+ unsigned long (*poolOffset) (struct _WsbmBufStorage * buf);
+ uint32_t(*placement) (struct _WsbmBufStorage * buf);
+ unsigned long (*size) (struct _WsbmBufStorage * buf);
+ struct _WsbmKernelBuf *(*kernel) (struct _WsbmBufStorage * buf);
+ struct _WsbmBufStorage *(*create) (struct _WsbmBufferPool * pool,
+ unsigned long size,
+ uint32_t placement, unsigned alignment);
+ struct _WsbmBufStorage *(*createByReference) (struct _WsbmBufferPool * pool,
+ uint32_t handle);
+ void (*fence) (struct _WsbmBufStorage * buf,
+ struct _WsbmFenceObject * fence);
+ void (*unvalidate) (struct _WsbmBufStorage * buf);
+ int (*validate) (struct _WsbmBufStorage * buf, uint64_t set_flags,
+ uint64_t clr_flags);
+ int (*waitIdle) (struct _WsbmBufStorage * buf, int lazy);
+ int (*setStatus) (struct _WsbmBufStorage * buf,
+ uint32_t set_placement,
+ uint32_t clr_placement);
+ void (*takeDown) (struct _WsbmBufferPool * pool);
+};
+
+static inline int
+wsbmBufStorageInit(struct _WsbmBufStorage *storage, struct _WsbmBufferPool *pool)
+{
+ int ret = WSBM_MUTEX_INIT(&storage->mutex);
+ if (ret)
+ return -ENOMEM;
+ storage->pool = pool;
+ wsbmAtomicSet(&storage->refCount, 1);
+ wsbmAtomicSet(&storage->onList, 0);
+ storage->destroyContainer = NULL;
+ return 0;
+}
+
+static inline void
+wsbmBufStorageTakedown(struct _WsbmBufStorage *storage)
+{
+ WSBM_MUTEX_FREE(&storage->mutex);
+}
+
+static inline void
+wsbmBufStorageUnref(struct _WsbmBufStorage **pStorage)
+{
+ struct _WsbmBufStorage *storage = *pStorage;
+
+ *pStorage = NULL;
+ if (storage == NULL)
+ return;
+
+ if (wsbmAtomicDecZero(&storage->refCount)) {
+ if (storage->destroyContainer)
+ storage->destroyContainer(storage->destroyArg);
+ storage->pool->destroy(&storage);
+ return;
+ }
+}
+
+/*
+ * Builtin pools.
+ */
+
+/*
+ * Kernel buffer objects. Size in multiples of page size. Page size aligned.
+ */
+
+extern struct _WsbmBufferPool *wsbmTTMPoolInit(int fd, unsigned int devOffset);
+extern struct _WsbmBufferPool *wsbmMallocPoolInit(void);
+
+struct _WsbmSlabCache;
+extern struct _WsbmBufferPool * wsbmSlabPoolInit(int fd, uint32_t devOffset,
+ uint32_t placement,
+ uint32_t validMask,
+ uint32_t smallestSize,
+ uint32_t numSizes,
+ uint32_t desiredNumBuffers,
+ uint32_t maxSlabSize,
+ uint32_t pageAlignment,
+ struct _WsbmSlabCache *cache);
+extern struct _WsbmSlabCache *
+wsbmSlabCacheInit(uint32_t checkIntervalMsec, uint32_t slabTimeoutMsec);
+extern void wsbmSlabCacheFinish(struct _WsbmSlabCache *cache);
+
+extern struct _WsbmBufferPool *
+wsbmUserPoolInit(void *vramAddr,
+ unsigned long vramStart, unsigned long vramSize,
+ void *agpAddr, unsigned long agpStart,
+ unsigned long agpSize,
+ uint32_t (*fenceTypes) (uint64_t set_flags));
+
+extern void wsbmUserPoolClean(struct _WsbmBufferPool *pool,
+ int cleanVram,
+ int cleanAgp);
+
+#endif
diff --git a/src/wsbm_priv.h b/src/wsbm_priv.h
new file mode 100644
index 0000000..d39f32a
--- /dev/null
+++ b/src/wsbm_priv.h
@@ -0,0 +1,43 @@
+/**************************************************************************
+ *
+ * Copyright 2008 Tungsten Graphics, Inc., Cedar Park, TX., USA
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ **************************************************************************/
+/*
+ * Authors:
+ * Thomas Hellström <thomas-at-tungstengraphics-dot-com>
+ */
+
+#ifndef _WSBM_PRIV_H_
+#define _WSBM_PRIV_H_
+
+struct _WsbmKernelBuf
+{
+ uint64_t gpuOffset;
+ uint32_t handle;
+ uint32_t placement;
+ uint32_t fence_type_mask;
+};
+
+#endif
diff --git a/src/wsbm_slabpool.c b/src/wsbm_slabpool.c
new file mode 100644
index 0000000..1c23449
--- /dev/null
+++ b/src/wsbm_slabpool.c
@@ -0,0 +1,1202 @@
+/**************************************************************************
+ *
+ * Copyright 2006-2008 Tungsten Graphics, Inc., Cedar Park, TX., USA
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ *
+ **************************************************************************/
+/*
+ * Authors: Thomas Hellstrom <thomas-at-tungstengraphics-dot-com>
+ */
+
+#ifdef HAVE_CONFIG_H
+#include "config.h"
+#endif
+
+#include <ttm/ttm_placement_user.h>
+#include <stdint.h>
+#include <sys/time.h>
+#include <errno.h>
+#include <unistd.h>
+#include <assert.h>
+#include <string.h>
+#include <sys/mman.h>
+#include <xf86drm.h>
+#include "wsbm_pool.h"
+#include "wsbm_fencemgr.h"
+#include "wsbm_priv.h"
+#include "wsbm_manager.h"
+
+#define WSBM_SLABPOOL_ALLOC_RETRIES 100
+#define DRMRESTARTCOMMANDWRITE(_fd, _val, _arg, _ret) \
+ do { \
+ (_ret) = drmCommandWrite(_fd, _val, &(_arg), sizeof(_arg)); \
+ } while ((_ret) == -EAGAIN || (_ret) == -ERESTART); \
+
+#define DRMRESTARTCOMMANDWRITEREAD(_fd, _val, _arg, _ret) \
+ do { \
+ (_ret) = drmCommandWriteRead(_fd, _val, &(_arg), sizeof(_arg)); \
+ } while ((_ret) == -EAGAIN || (_ret) == -ERESTART); \
+
+
+#ifdef DEBUG_FENCESIGNALED
+static int createbuffer = 0;
+static int fencesignaled = 0;
+#endif
+
+struct _WsbmSlab;
+
+struct _WsbmSlabBuffer {
+ struct _WsbmKernelBuf kBuf;
+ struct _WsbmBufStorage storage;
+ struct _WsbmCond event;
+
+ /*
+ * Remains constant after creation.
+ */
+
+ int isSlabBuffer;
+ struct _WsbmSlab *parent;
+ uint32_t start;
+ void *virtual;
+ unsigned long requestedSize;
+ uint64_t mapHandle;
+
+ /*
+ * Protected by struct _WsbmSlabSizeHeader::mutex
+ */
+
+ struct _WsbmListHead head;
+
+ /*
+ * Protected by this::mutex
+ */
+
+ struct _WsbmFenceObject *fence;
+ uint32_t fenceType;
+ struct _WsbmAtomic writers; /* (Only upping) */
+ int unFenced;
+};
+
+struct _WsbmSlabPool;
+struct _WsbmSlabKernelBO {
+
+ /*
+ * Constant at creation
+ */
+
+ struct _WsbmKernelBuf kBuf;
+ uint32_t pageAlignment;
+ void *virtual;
+ unsigned long actualSize;
+ uint64_t mapHandle;
+
+ /*
+ * Protected by struct _WsbmSlabCache::mutex
+ */
+
+ struct _WsbmSlabPool *slabPool;
+ uint32_t proposedPlacement;
+ struct _WsbmListHead timeoutHead;
+ struct _WsbmListHead head;
+ struct timeval timeFreed;
+};
+
+struct _WsbmSlab{
+ struct _WsbmListHead head;
+ struct _WsbmListHead freeBuffers;
+ uint32_t numBuffers;
+ uint32_t numFree;
+ struct _WsbmSlabBuffer *buffers;
+ struct _WsbmSlabSizeHeader *header;
+ struct _WsbmSlabKernelBO *kbo;
+};
+
+
+struct _WsbmSlabSizeHeader {
+ /*
+ * Constant at creation.
+ */
+ struct _WsbmSlabPool *slabPool;
+ uint32_t bufSize;
+
+ /*
+ * Protected by this::mutex
+ */
+
+ struct _WsbmListHead slabs;
+ struct _WsbmListHead freeSlabs;
+ struct _WsbmListHead delayedBuffers;
+ uint32_t numDelayed;
+ struct _WsbmMutex mutex;
+};
+
+struct _WsbmSlabCache {
+ struct timeval slabTimeout;
+ struct timeval checkInterval;
+ struct timeval nextCheck;
+ struct _WsbmListHead timeoutList;
+ struct _WsbmListHead unCached;
+ struct _WsbmListHead cached;
+ struct _WsbmMutex mutex;
+};
+
+
+struct _WsbmSlabPool {
+ struct _WsbmBufferPool pool;
+
+ /*
+ * The data of this structure remains constant after
+ * initialization and thus needs no mutex protection.
+ */
+
+ unsigned int devOffset;
+ struct _WsbmSlabCache *cache;
+ uint32_t proposedPlacement;
+ uint32_t validMask;
+ uint32_t *bucketSizes;
+ uint32_t numBuckets;
+ uint32_t pageSize;
+ int pageAlignment;
+ int maxSlabSize;
+ int desiredNumBuffers;
+ struct _WsbmSlabSizeHeader *headers;
+};
+
+static inline struct _WsbmSlabPool *
+slabPoolFromPool(struct _WsbmBufferPool *pool)
+{
+ return containerOf(pool, struct _WsbmSlabPool , pool);
+}
+
+static inline struct _WsbmSlabPool *
+slabPoolFromBuf(struct _WsbmSlabBuffer *sBuf)
+{
+ return slabPoolFromPool(sBuf->storage.pool);
+}
+
+static inline struct _WsbmSlabBuffer *
+slabBuffer(struct _WsbmBufStorage *buf)
+{
+ return containerOf(buf, struct _WsbmSlabBuffer, storage);
+}
+
+
+/*
+ * FIXME: Perhaps arrange timeout slabs in size buckets for fast
+ * retreival??
+ */
+
+
+static inline int
+wsbmTimeAfterEq(struct timeval *arg1, struct timeval *arg2)
+{
+ return ((arg1->tv_sec > arg2->tv_sec) ||
+ ((arg1->tv_sec == arg2->tv_sec) &&
+ (arg1->tv_usec > arg2->tv_usec)));
+}
+
+static inline void
+wsbmTimeAdd(struct timeval *arg, struct timeval *add)
+{
+ unsigned int sec;
+
+ arg->tv_sec += add->tv_sec;
+ arg->tv_usec += add->tv_usec;
+ sec = arg->tv_usec / 1000000;
+ arg->tv_sec += sec;
+ arg->tv_usec -= sec*1000000;
+}
+
+static void
+wsbmFreeKernelBO(struct _WsbmSlabKernelBO *kbo)
+{
+ struct ttm_pl_reference_req arg;
+ struct _WsbmSlabPool *slabPool;
+
+ if (!kbo)
+ return;
+
+ slabPool = kbo->slabPool;
+ arg.handle = kbo->kBuf.handle;
+ (void) munmap(kbo->virtual, kbo->actualSize);
+ (void) drmCommandWrite(slabPool->pool.fd, slabPool->devOffset + TTM_PL_UNREF,
+ &arg, sizeof(arg));
+ free(kbo);
+}
+
+
+static void
+wsbmFreeTimeoutKBOsLocked(struct _WsbmSlabCache *cache,
+ struct timeval *time)
+{
+ struct _WsbmListHead *list, *next;
+ struct _WsbmSlabKernelBO *kbo;
+
+ if (!wsbmTimeAfterEq(time, &cache->nextCheck))
+ return;
+
+ WSBMLISTFOREACHSAFE(list, next, &cache->timeoutList) {
+ kbo = WSBMLISTENTRY(list, struct _WsbmSlabKernelBO, timeoutHead);
+
+ if (!wsbmTimeAfterEq(time, &kbo->timeFreed))
+ break;
+
+ WSBMLISTDELINIT(&kbo->timeoutHead);
+ WSBMLISTDELINIT(&kbo->head);
+ wsbmFreeKernelBO(kbo);
+ }
+
+ cache->nextCheck = *time;
+ wsbmTimeAdd(&cache->nextCheck, &cache->checkInterval);
+}
+
+
+/*
+ * Add a _SlabKernelBO to the free slab manager.
+ * This means that it is available for reuse, but if it's not
+ * reused in a while, it will be freed.
+ */
+
+static void
+wsbmSetKernelBOFree(struct _WsbmSlabCache *cache,
+ struct _WsbmSlabKernelBO *kbo)
+{
+ struct timeval time;
+ struct timeval timeFreed;
+
+ gettimeofday(&time, NULL);
+ timeFreed = time;
+ WSBM_MUTEX_LOCK(&cache->mutex);
+ wsbmTimeAdd(&timeFreed, &cache->slabTimeout);
+ kbo->timeFreed = timeFreed;
+
+ if (kbo->kBuf.placement & TTM_PL_FLAG_CACHED)
+ WSBMLISTADD(&kbo->head, &cache->cached);
+ else
+ WSBMLISTADD(&kbo->head, &cache->unCached);
+
+ WSBMLISTADDTAIL(&kbo->timeoutHead, &cache->timeoutList);
+ wsbmFreeTimeoutKBOsLocked(cache, &time);
+
+ WSBM_MUTEX_UNLOCK(&cache->mutex);
+}
+
+/*
+ * Get a _SlabKernelBO for us to use as storage for a slab.
+ */
+
+
+static struct _WsbmSlabKernelBO *
+wsbmAllocKernelBO(struct _WsbmSlabSizeHeader *header)
+
+{
+ struct _WsbmSlabPool *slabPool = header->slabPool;
+ struct _WsbmSlabCache *cache = slabPool->cache;
+ struct _WsbmListHead *list, *head;
+ uint32_t size = header->bufSize * slabPool->desiredNumBuffers;
+ struct _WsbmSlabKernelBO *kbo;
+ struct _WsbmSlabKernelBO *kboTmp;
+ int ret;
+
+ /*
+ * FIXME: We should perhaps allow some variation in slabsize in order
+ * to efficiently reuse slabs.
+ */
+
+
+ size = (size <= slabPool->maxSlabSize) ? size : slabPool->maxSlabSize;
+ if (size < header->bufSize)
+ size = header->bufSize;
+ size = (size + slabPool->pageSize - 1) & ~(slabPool->pageSize - 1);
+ WSBM_MUTEX_LOCK(&cache->mutex);
+
+ kbo = NULL;
+
+ retry:
+ head = (slabPool->proposedPlacement & TTM_PL_FLAG_CACHED) ?
+ &cache->cached : &cache->unCached;
+
+ WSBMLISTFOREACH(list, head) {
+ kboTmp = WSBMLISTENTRY(list, struct _WsbmSlabKernelBO, head);
+ if ((kboTmp->actualSize == size) &&
+ (slabPool->pageAlignment == 0 ||
+ (kboTmp->pageAlignment % slabPool->pageAlignment) == 0)) {
+
+ if (!kbo)
+ kbo = kboTmp;
+
+ if ((kbo->proposedPlacement ^ slabPool->proposedPlacement) == 0)
+ break;
+
+ }
+ }
+
+ if (kbo) {
+ WSBMLISTDELINIT(&kbo->head);
+ WSBMLISTDELINIT(&kbo->timeoutHead);
+ }
+
+ WSBM_MUTEX_UNLOCK(&cache->mutex);
+
+ if (kbo) {
+ uint32_t new_mask = kbo->proposedPlacement ^ slabPool->proposedPlacement;
+
+ ret = 0;
+ if (new_mask) {
+ union ttm_pl_setstatus_arg arg;
+ struct ttm_pl_setstatus_req *req = &arg.req;
+ struct ttm_pl_rep *rep = &arg.rep;
+
+ req->handle = kbo->kBuf.handle;
+ req->set_placement = slabPool->proposedPlacement & new_mask;
+ req->clr_placement = ~slabPool->proposedPlacement & new_mask;
+ DRMRESTARTCOMMANDWRITEREAD(slabPool->pool.fd,
+ slabPool->devOffset + TTM_PL_SETSTATUS,
+ arg, ret);
+ if (ret == 0) {
+ kbo->kBuf.gpuOffset = rep->gpu_offset;
+ kbo->kBuf.placement = rep->placement;
+ }
+ kbo->proposedPlacement = slabPool->proposedPlacement;
+ }
+
+ if (ret == 0)
+ return kbo;
+
+ wsbmFreeKernelBO(kbo);
+ kbo = NULL;
+ goto retry;
+ }
+
+ kbo = calloc(1, sizeof(*kbo));
+ if (!kbo)
+ return NULL;
+
+ {
+ union ttm_pl_create_arg arg;
+
+ kbo->slabPool = slabPool;
+ WSBMINITLISTHEAD(&kbo->head);
+ WSBMINITLISTHEAD(&kbo->timeoutHead);
+
+ arg.req.size = size;
+ arg.req.placement = slabPool->proposedPlacement;
+ arg.req.page_alignment = slabPool->pageAlignment;
+
+ DRMRESTARTCOMMANDWRITEREAD(slabPool->pool.fd,
+ slabPool->devOffset + TTM_PL_CREATE,
+ arg, ret);
+ if (ret)
+ goto out_err0;
+
+ kbo->kBuf.gpuOffset = arg.rep.gpu_offset;
+ kbo->kBuf.placement = arg.rep.placement;
+ kbo->kBuf.handle = arg.rep.handle;
+
+ kbo->actualSize = arg.rep.bo_size;
+ kbo->mapHandle = arg.rep.map_handle;
+ kbo->proposedPlacement = slabPool->proposedPlacement;
+ }
+
+ kbo->virtual = mmap(0, kbo->actualSize,
+ PROT_READ | PROT_WRITE, MAP_SHARED,
+ slabPool->pool.fd, kbo->mapHandle);
+
+ if (kbo->virtual == MAP_FAILED) {
+ ret = -errno;
+ goto out_err1;
+ }
+
+ return kbo;
+
+ out_err1:
+ {
+ struct ttm_pl_reference_req arg =
+ {.handle = kbo->kBuf.handle};
+
+ (void) drmCommandWrite(slabPool->pool.fd,
+ slabPool->devOffset + TTM_PL_UNREF,
+ &arg, sizeof(arg));
+ }
+ out_err0:
+ free(kbo);
+ return NULL;
+}
+
+
+static int
+wsbmAllocSlab(struct _WsbmSlabSizeHeader *header)
+{
+ struct _WsbmSlab *slab;
+ struct _WsbmSlabBuffer *sBuf;
+ uint32_t numBuffers;
+ int ret;
+ int i;
+
+ slab = calloc(1, sizeof(*slab));
+ if (!slab)
+ return -ENOMEM;
+
+ slab->kbo = wsbmAllocKernelBO(header);
+ if (!slab->kbo) {
+ ret = -ENOMEM;
+ goto out_err0;
+ }
+
+ numBuffers = slab->kbo->actualSize / header->bufSize;
+
+ slab->buffers = calloc(numBuffers, sizeof(*slab->buffers));
+ if (!slab->buffers) {
+ ret = -ENOMEM;
+ goto out_err1;
+ }
+
+ WSBMINITLISTHEAD(&slab->head);
+ WSBMINITLISTHEAD(&slab->freeBuffers);
+ slab->numBuffers = numBuffers;
+ slab->numFree = 0;
+ slab->header = header;
+
+ sBuf = slab->buffers;
+ for (i=0; i < numBuffers; ++i) {
+ ret = wsbmBufStorageInit(&sBuf->storage, &header->slabPool->pool);
+ if (ret)
+ goto out_err2;
+ sBuf->parent = slab;
+ sBuf->start = i* header->bufSize;
+ sBuf->virtual = (void *) ((uint8_t *) slab->kbo->virtual +
+ sBuf->start);
+ wsbmAtomicSet(&sBuf->writers, 0);
+ sBuf->isSlabBuffer = 1;
+ WSBM_COND_INIT(&sBuf->event);
+ WSBMLISTADDTAIL(&sBuf->head, &slab->freeBuffers);
+ slab->numFree++;
+ sBuf++;
+ }
+
+ WSBMLISTADDTAIL(&slab->head, &header->slabs);
+
+ return 0;
+
+ out_err2:
+ sBuf = slab->buffers;
+ for (i=0; i < numBuffers; ++i) {
+ if (sBuf->parent == slab) {
+ WSBM_COND_FREE(&sBuf->event);
+ wsbmBufStorageTakedown(&sBuf->storage);
+ }
+ sBuf++;
+ }
+ out_err1:
+ wsbmSetKernelBOFree(header->slabPool->cache, slab->kbo);
+ free(slab->buffers);
+ out_err0:
+ free(slab);
+ return ret;
+}
+
+/*
+ * Delete a buffer from the slab header delayed list and put
+ * it on the slab free list.
+ */
+
+static void
+wsbmSlabFreeBufferLocked(struct _WsbmSlabBuffer *buf)
+{
+ struct _WsbmSlab *slab = buf->parent;
+ struct _WsbmSlabSizeHeader *header = slab->header;
+ struct _WsbmListHead *list = &buf->head;
+
+ WSBMLISTDEL(list);
+ WSBMLISTADDTAIL(list, &slab->freeBuffers);
+ slab->numFree++;
+
+ if (slab->head.next == &slab->head)
+ WSBMLISTADDTAIL(&slab->head, &header->slabs);
+
+ if (slab->numFree == slab->numBuffers) {
+ list = &slab->head;
+ WSBMLISTDEL(list);
+ WSBMLISTADDTAIL(list, &header->freeSlabs);
+ }
+
+ if (header->slabs.next == &header->slabs ||
+ slab->numFree != slab->numBuffers) {
+
+ struct _WsbmListHead *next;
+ struct _WsbmSlabCache *cache = header->slabPool->cache;
+
+ WSBMLISTFOREACHSAFE(list, next, &header->freeSlabs) {
+ int i;
+ struct _WsbmSlabBuffer *sBuf;
+
+ slab = WSBMLISTENTRY(list, struct _WsbmSlab, head);
+ WSBMLISTDELINIT(list);
+
+ sBuf = slab->buffers;
+ for (i=0; i < slab->numBuffers; ++i) {
+ if (sBuf->parent == slab) {
+ WSBM_COND_FREE(&sBuf->event);
+ wsbmBufStorageTakedown(&sBuf->storage);
+ }
+ sBuf++;
+ }
+ wsbmSetKernelBOFree(cache, slab->kbo);
+ free(slab->buffers);
+ free(slab);
+ }
+ }
+}
+
+static void
+wsbmSlabCheckFreeLocked(struct _WsbmSlabSizeHeader *header, int wait)
+{
+ struct _WsbmListHead *list, *prev, *first;
+ struct _WsbmSlabBuffer *sBuf;
+ struct _WsbmSlab *slab;
+ int firstWasSignaled = 1;
+ int signaled;
+ int i;
+ int ret;
+
+ /*
+ * Rerun the freeing test if the youngest tested buffer
+ * was signaled, since there might be more idle buffers
+ * in the delay list.
+ */
+
+ while (firstWasSignaled) {
+ firstWasSignaled = 0;
+ signaled = 0;
+ first = header->delayedBuffers.next;
+
+ /* Only examine the oldest 1/3 of delayed buffers:
+ */
+ if (header->numDelayed > 3) {
+ for (i = 0; i < header->numDelayed; i += 3) {
+ first = first->next;
+ }
+ }
+
+ /*
+ * No need to take the buffer mutex for each buffer we loop
+ * through since we're currently the only user.
+ */
+
+
+ WSBMLISTFOREACHPREVSAFE(list, prev, first->next) {
+
+ if (list == &header->delayedBuffers)
+ break;
+
+ sBuf = WSBMLISTENTRY(list, struct _WsbmSlabBuffer, head);
+ slab = sBuf->parent;
+
+ if (!signaled) {
+ if (wait) {
+ ret = wsbmFenceFinish(sBuf->fence, sBuf->fenceType, 0);
+ if (ret)
+ break;
+ signaled = 1;
+ wait = 0;
+ } else {
+ signaled = wsbmFenceSignaled(sBuf->fence, sBuf->fenceType);
+#ifdef DEBUG_FENCESIGNALED
+ fencesignaled++;
+#endif
+ }
+ if (signaled) {
+ if (list == first)
+ firstWasSignaled = 1;
+ wsbmFenceUnreference(&sBuf->fence);
+ header->numDelayed--;
+ wsbmSlabFreeBufferLocked(sBuf);
+ } else
+ break;
+ } else if (wsbmFenceSignaledCached(sBuf->fence, sBuf->fenceType)) {
+ wsbmFenceUnreference(&sBuf->fence);
+ header->numDelayed--;
+ wsbmSlabFreeBufferLocked(sBuf);
+ }
+ }
+ }
+}
+
+
+static struct _WsbmSlabBuffer *
+wsbmSlabAllocBuffer(struct _WsbmSlabSizeHeader *header)
+{
+ static struct _WsbmSlabBuffer *buf;
+ struct _WsbmSlab *slab;
+ struct _WsbmListHead *list;
+ int count = WSBM_SLABPOOL_ALLOC_RETRIES;
+
+ WSBM_MUTEX_LOCK(&header->mutex);
+ while(header->slabs.next == &header->slabs && count > 0) {
+ wsbmSlabCheckFreeLocked(header, 0);
+ if (header->slabs.next != &header->slabs)
+ break;
+
+ WSBM_MUTEX_UNLOCK(&header->mutex);
+ if (count != WSBM_SLABPOOL_ALLOC_RETRIES)
+ usleep(1000);
+ WSBM_MUTEX_LOCK(&header->mutex);
+ (void) wsbmAllocSlab(header);
+ count--;
+ }
+
+ list = header->slabs.next;
+ if (list == &header->slabs) {
+ WSBM_MUTEX_UNLOCK(&header->mutex);
+ return NULL;
+ }
+ slab = WSBMLISTENTRY(list, struct _WsbmSlab, head);
+ if (--slab->numFree == 0)
+ WSBMLISTDELINIT(list);
+
+ list = slab->freeBuffers.next;
+ WSBMLISTDELINIT(list);
+
+ WSBM_MUTEX_UNLOCK(&header->mutex);
+ buf = WSBMLISTENTRY(list, struct _WsbmSlabBuffer, head);
+
+#ifdef DEBUG_FENCESIGNALED
+ createbuffer++;
+#endif
+ return buf;
+}
+
+static struct _WsbmBufStorage *
+pool_create(struct _WsbmBufferPool *pool, unsigned long size,
+ uint32_t placement, unsigned alignment)
+{
+ struct _WsbmSlabPool *slabPool = slabPoolFromPool(pool);
+ struct _WsbmSlabSizeHeader *header;
+ struct _WsbmSlabBuffer *sBuf;
+ int i;
+ int ret;
+
+ /*
+ * FIXME: Check for compatibility.
+ */
+
+ header = slabPool->headers;
+ for (i=0; i<slabPool->numBuckets; ++i) {
+ if (header->bufSize >= size)
+ break;
+ header++;
+ }
+
+ if (i < slabPool->numBuckets) {
+ sBuf = wsbmSlabAllocBuffer(header);
+ return ((sBuf) ? &sBuf->storage : NULL);
+ }
+
+
+ /*
+ * Fall back to allocate a buffer object directly from DRM.
+ * and wrap it in a wsbmBO structure.
+ */
+
+ sBuf = calloc(1, sizeof(*sBuf));
+
+ if (!sBuf)
+ return NULL;
+
+ if (alignment) {
+ if ((alignment < slabPool->pageSize) && (slabPool->pageSize % alignment))
+ goto out_err0;
+ if ((alignment > slabPool->pageSize) && (alignment % slabPool->pageSize))
+ goto out_err0;
+ }
+
+ ret = wsbmBufStorageInit(&sBuf->storage, pool);
+ if (ret)
+ goto out_err0;
+
+ ret = WSBM_COND_INIT(&sBuf->event);
+ if (ret)
+ goto out_err1;
+
+ {
+ union ttm_pl_create_arg arg;
+
+ arg.req.size = size;
+ arg.req.placement = placement;
+ arg.req.page_alignment = alignment / slabPool->pageSize;
+
+ DRMRESTARTCOMMANDWRITEREAD(pool->fd,
+ slabPool->devOffset + TTM_PL_CREATE,
+ arg, ret);
+
+ if (ret)
+ goto out_err2;
+
+ sBuf->kBuf.gpuOffset = arg.rep.gpu_offset;
+ sBuf->kBuf.placement = arg.rep.placement;
+ sBuf->kBuf.handle = arg.rep.handle;
+ sBuf->mapHandle = arg.rep.map_handle;
+ sBuf->requestedSize = size;
+
+ sBuf->virtual = mmap(0, size, PROT_READ | PROT_WRITE, MAP_SHARED,
+ pool->fd, sBuf->mapHandle);
+
+ if (sBuf->virtual == MAP_FAILED)
+ goto out_err3;
+ }
+
+ wsbmAtomicSet(&sBuf->writers, 0);
+ return &sBuf->storage;
+ out_err3:
+ {
+ struct ttm_pl_reference_req arg;
+
+ arg.handle = sBuf->kBuf.handle;
+ (void) drmCommandWriteRead(pool->fd,
+ slabPool->devOffset + TTM_PL_UNREF,
+ &arg, sizeof(arg));
+ }
+ out_err2:
+ WSBM_COND_FREE(&sBuf->event);
+ out_err1:
+ wsbmBufStorageTakedown(&sBuf->storage);
+ out_err0:
+ free(sBuf);
+ return NULL;
+}
+
+static void
+pool_destroy(struct _WsbmBufStorage **p_buf)
+{
+ struct _WsbmBufStorage *buf = *p_buf;
+ struct _WsbmSlabBuffer *sBuf = slabBuffer(buf);
+ struct _WsbmSlab *slab;
+ struct _WsbmSlabSizeHeader *header;
+
+ *p_buf = NULL;
+
+ if (!sBuf->isSlabBuffer) {
+ struct _WsbmSlabPool *slabPool = slabPoolFromBuf(sBuf);
+ struct ttm_pl_reference_req arg;
+
+ if (sBuf->virtual != NULL) {
+ (void) munmap(sBuf->virtual, sBuf->requestedSize);
+ sBuf->virtual = NULL;
+ }
+
+ arg.handle = sBuf->kBuf.handle;
+ (void) drmCommandWrite(slabPool->pool.fd,
+ slabPool->devOffset + TTM_PL_UNREF,
+ &arg, sizeof(arg));
+
+ WSBM_COND_FREE(&sBuf->event);
+ wsbmBufStorageTakedown(&sBuf->storage);
+ free(sBuf);
+ return;
+ }
+
+ slab = sBuf->parent;
+ header = slab->header;
+
+ /*
+ * No need to take the buffer mutex below since we're the only user.
+ */
+
+ WSBM_MUTEX_LOCK(&header->mutex);
+ sBuf->unFenced = 0;
+ wsbmAtomicSet(&sBuf->writers, 0);
+ wsbmAtomicSet(&sBuf->storage.refCount, 1);
+
+ if (sBuf->fence && !wsbmFenceSignaledCached(sBuf->fence, sBuf->fenceType)) {
+ WSBMLISTADDTAIL(&sBuf->head, &header->delayedBuffers);
+ header->numDelayed++;
+ } else {
+ if (sBuf->fence)
+ wsbmFenceUnreference(&sBuf->fence);
+ wsbmSlabFreeBufferLocked(sBuf);
+ }
+
+ WSBM_MUTEX_UNLOCK(&header->mutex);
+}
+
+
+static void
+waitIdleLocked(struct _WsbmSlabBuffer *sBuf, int lazy)
+{
+ struct _WsbmBufStorage *storage = &sBuf->storage;
+
+ while(sBuf->unFenced || sBuf->fence != NULL) {
+
+ if (sBuf->unFenced)
+ WSBM_COND_WAIT(&sBuf->event, &storage->mutex);
+
+ if (sBuf->fence != NULL) {
+ if (!wsbmFenceSignaled(sBuf->fence, sBuf->fenceType)) {
+ struct _WsbmFenceObject *fence =
+ wsbmFenceReference(sBuf->fence);
+
+ WSBM_MUTEX_UNLOCK(&storage->mutex);
+ (void) wsbmFenceFinish(fence, sBuf->fenceType, lazy);
+ WSBM_MUTEX_LOCK(&storage->mutex);
+ if (sBuf->fence == fence)
+ wsbmFenceUnreference(&sBuf->fence);
+
+ wsbmFenceUnreference(&fence);
+ } else {
+ wsbmFenceUnreference(&sBuf->fence);
+ }
+ }
+ }
+}
+
+static int
+pool_waitIdle(struct _WsbmBufStorage *buf, int lazy)
+{
+ struct _WsbmSlabBuffer *sBuf = slabBuffer(buf);
+
+ WSBM_MUTEX_LOCK(&buf->mutex);
+ waitIdleLocked(sBuf, lazy);
+ WSBM_MUTEX_UNLOCK(&buf->mutex);
+
+ return 0;
+}
+
+static int
+pool_map(struct _WsbmBufStorage *buf, unsigned mode, void **virtual)
+{
+ struct _WsbmSlabBuffer *sBuf = slabBuffer(buf);
+
+ *virtual = sBuf->virtual;
+
+ return 0;
+}
+
+static void
+pool_releaseFromCpu(struct _WsbmBufStorage *buf, unsigned mode)
+{
+ struct _WsbmSlabBuffer *sBuf = slabBuffer(buf);
+
+ if (wsbmAtomicDecZero(&sBuf->writers))
+ WSBM_COND_BROADCAST(&sBuf->event);
+}
+
+static int
+pool_syncForCpu(struct _WsbmBufStorage *buf, unsigned mode)
+{
+ struct _WsbmSlabBuffer *sBuf = slabBuffer(buf);
+ int ret = 0;
+
+ WSBM_MUTEX_LOCK(&buf->mutex);
+ if ((mode & WSBM_SYNCCPU_DONT_BLOCK)) {
+ int signaled;
+
+ if (sBuf->unFenced) {
+ ret = -EBUSY;
+ goto out_unlock;
+ }
+
+ if (sBuf->isSlabBuffer)
+ signaled = (sBuf->fence == NULL) ||
+ wsbmFenceSignaledCached(sBuf->fence, sBuf->fenceType);
+ else
+ signaled = (sBuf->fence == NULL) ||
+ wsbmFenceSignaled(sBuf->fence, sBuf->fenceType);
+
+ ret = 0;
+ if (signaled) {
+ wsbmFenceUnreference(&sBuf->fence);
+ wsbmAtomicInc(&sBuf->writers);
+ } else
+ ret = -EBUSY;
+ goto out_unlock;
+ }
+ waitIdleLocked(sBuf, 0);
+ wsbmAtomicInc(&sBuf->writers);
+ out_unlock:
+ WSBM_MUTEX_UNLOCK(&buf->mutex);
+ return ret;
+}
+
+static void
+pool_unmap(struct _WsbmBufStorage *buf)
+{
+ ;
+}
+
+static unsigned long
+pool_poolOffset(struct _WsbmBufStorage *buf)
+{
+ struct _WsbmSlabBuffer *sBuf = slabBuffer(buf);
+
+ return sBuf->start;
+}
+
+static unsigned long
+pool_size(struct _WsbmBufStorage *buf)
+{
+ struct _WsbmSlabBuffer *sBuf = slabBuffer(buf);
+ if (!sBuf->isSlabBuffer)
+ return sBuf->requestedSize;
+
+ return sBuf->parent->header->bufSize;
+}
+
+static struct _WsbmKernelBuf *
+pool_kernel(struct _WsbmBufStorage *buf)
+{
+ struct _WsbmSlabBuffer *sBuf = slabBuffer(buf);
+
+ return (sBuf->isSlabBuffer) ? &sBuf->parent->kbo->kBuf : &sBuf->kBuf;
+}
+
+
+static unsigned long
+pool_offset(struct _WsbmBufStorage *buf)
+{
+ return pool_kernel(buf)->gpuOffset + pool_poolOffset(buf);
+}
+
+
+static void
+pool_fence(struct _WsbmBufStorage *buf, struct _WsbmFenceObject *fence)
+{
+ struct _WsbmSlabBuffer *sBuf = slabBuffer(buf);
+ struct _WsbmKernelBuf *kBuf;
+
+ WSBM_MUTEX_LOCK(&buf->mutex);
+ if (sBuf->fence)
+ wsbmFenceUnreference(&sBuf->fence);
+
+ sBuf->fence = wsbmFenceReference(fence);
+ kBuf = pool_kernel(buf);
+ sBuf->fenceType = kBuf->fence_type_mask;
+ sBuf->unFenced = 0;
+ WSBM_COND_BROADCAST(&sBuf->event);
+ WSBM_MUTEX_UNLOCK(&buf->mutex);
+}
+
+static int
+pool_validate(struct _WsbmBufStorage *buf,
+ uint64_t set_flags,
+ uint64_t clr_flags)
+{
+ struct _WsbmSlabBuffer *sBuf = slabBuffer(buf);
+
+ WSBM_MUTEX_LOCK(&buf->mutex);
+ while(wsbmAtomicRead(&sBuf->writers) != 0) {
+ WSBM_COND_WAIT(&sBuf->event, &buf->mutex);
+ }
+
+ sBuf->unFenced = 1;
+ WSBM_MUTEX_UNLOCK(&buf->mutex);
+ return 0;
+}
+
+static void
+pool_unvalidate(struct _WsbmBufStorage *buf)
+{
+ struct _WsbmSlabBuffer *sBuf = slabBuffer(buf);
+
+ WSBM_MUTEX_LOCK(&buf->mutex);
+ if (sBuf->unFenced) {
+ sBuf->unFenced = 0;
+ WSBM_COND_BROADCAST(&sBuf->event);
+ }
+ WSBM_MUTEX_UNLOCK(&buf->mutex);
+}
+
+struct _WsbmSlabCache *
+wsbmSlabCacheInit(uint32_t checkIntervalMsec, uint32_t slabTimeoutMsec)
+{
+ struct _WsbmSlabCache *tmp;
+
+ tmp = calloc(1, sizeof(*tmp));
+ if (!tmp)
+ return NULL;
+
+ WSBM_MUTEX_INIT(&tmp->mutex);
+ WSBM_MUTEX_LOCK(&tmp->mutex);
+ tmp->slabTimeout.tv_usec = slabTimeoutMsec*1000;
+ tmp->slabTimeout.tv_sec = tmp->slabTimeout.tv_usec / 1000000;
+ tmp->slabTimeout.tv_usec -= tmp->slabTimeout.tv_sec*1000000;
+
+ tmp->checkInterval.tv_usec = checkIntervalMsec*1000;
+ tmp->checkInterval.tv_sec = tmp->checkInterval.tv_usec / 1000000;
+ tmp->checkInterval.tv_usec -= tmp->checkInterval.tv_sec*1000000;
+
+ gettimeofday(&tmp->nextCheck, NULL);
+ wsbmTimeAdd(&tmp->nextCheck, &tmp->checkInterval);
+ WSBMINITLISTHEAD(&tmp->timeoutList);
+ WSBMINITLISTHEAD(&tmp->unCached);
+ WSBMINITLISTHEAD(&tmp->cached);
+ WSBM_MUTEX_UNLOCK(&tmp->mutex);
+
+ return tmp;
+}
+
+void
+wsbmSlabCacheFinish(struct _WsbmSlabCache *cache)
+{
+ struct timeval time;
+
+ time = cache->nextCheck;
+ WSBM_MUTEX_LOCK(&cache->mutex);
+ wsbmTimeAdd(&time, &cache->checkInterval);
+ wsbmFreeTimeoutKBOsLocked(cache, &time);
+ WSBM_MUTEX_UNLOCK(&cache->mutex);
+
+ assert(cache->timeoutList.next == &cache->timeoutList);
+ assert(cache->unCached.next == &cache->unCached);
+ assert(cache->cached.next == &cache->cached);
+
+ WSBM_MUTEX_FREE(&cache->mutex);
+ free(cache);
+}
+
+static void
+wsbmInitSizeHeader(struct _WsbmSlabPool *slabPool, uint32_t size,
+ struct _WsbmSlabSizeHeader *header)
+{
+ WSBM_MUTEX_INIT(&header->mutex);
+ WSBM_MUTEX_LOCK(&header->mutex);
+
+ WSBMINITLISTHEAD(&header->slabs);
+ WSBMINITLISTHEAD(&header->freeSlabs);
+ WSBMINITLISTHEAD(&header->delayedBuffers);
+
+ header->numDelayed = 0;
+ header->slabPool = slabPool;
+ header->bufSize = size;
+
+ WSBM_MUTEX_UNLOCK(&header->mutex);
+}
+
+static void
+wsbmFinishSizeHeader(struct _WsbmSlabSizeHeader *header)
+{
+ struct _WsbmListHead *list, *next;
+ struct _WsbmSlabBuffer *sBuf;
+
+ WSBM_MUTEX_LOCK(&header->mutex);
+ WSBMLISTFOREACHSAFE(list, next, &header->delayedBuffers) {
+ sBuf = WSBMLISTENTRY(list, struct _WsbmSlabBuffer, head);
+ if (sBuf->fence) {
+ (void) wsbmFenceFinish(sBuf->fence, sBuf->fenceType, 0);
+ wsbmFenceUnreference(&sBuf->fence);
+ }
+ header->numDelayed--;
+ wsbmSlabFreeBufferLocked(sBuf);
+ }
+ WSBM_MUTEX_UNLOCK(&header->mutex);
+ WSBM_MUTEX_FREE(&header->mutex);
+}
+
+
+static void
+pool_takedown(struct _WsbmBufferPool *pool)
+{
+ struct _WsbmSlabPool *slabPool = slabPoolFromPool(pool);
+ int i;
+
+ for (i=0; i<slabPool->numBuckets; ++i) {
+ wsbmFinishSizeHeader(&slabPool->headers[i]);
+ }
+
+ free(slabPool->headers);
+ free(slabPool->bucketSizes);
+ free(slabPool);
+}
+
+struct _WsbmBufferPool *
+wsbmSlabPoolInit(int fd,
+ uint32_t devOffset,
+ uint32_t placement,
+ uint32_t validMask,
+ uint32_t smallestSize,
+ uint32_t numSizes,
+ uint32_t desiredNumBuffers,
+ uint32_t maxSlabSize,
+ uint32_t pageAlignment,
+ struct _WsbmSlabCache *cache)
+{
+ struct _WsbmBufferPool *pool;
+ struct _WsbmSlabPool *slabPool;
+ uint32_t i;
+
+ slabPool = calloc(1, sizeof(*slabPool));
+ if (!slabPool)
+ return NULL;
+
+ pool = &slabPool->pool;
+
+ slabPool->bucketSizes = calloc(numSizes, sizeof(*slabPool->bucketSizes));
+ if (!slabPool->bucketSizes)
+ goto out_err0;
+
+ slabPool->headers = calloc(numSizes, sizeof(*slabPool->headers));
+ if (!slabPool->headers)
+ goto out_err1;
+
+ slabPool->devOffset = devOffset;
+ slabPool->cache = cache;
+ slabPool->proposedPlacement = placement;
+ slabPool->validMask = validMask;
+ slabPool->numBuckets = numSizes;
+ slabPool->pageSize = getpagesize();
+ slabPool->pageAlignment = pageAlignment;
+ slabPool->maxSlabSize = maxSlabSize;
+ slabPool->desiredNumBuffers = desiredNumBuffers;
+
+ for (i=0; i<slabPool->numBuckets; ++i) {
+ slabPool->bucketSizes[i] = (smallestSize << i);
+ wsbmInitSizeHeader(slabPool, slabPool->bucketSizes[i],
+ &slabPool->headers[i]);
+ }
+
+ pool->fd = fd;
+ pool->map = &pool_map;
+ pool->unmap = &pool_unmap;
+ pool->destroy = &pool_destroy;
+ pool->offset = &pool_offset;
+ pool->poolOffset = &pool_poolOffset;
+ pool->size = &pool_size;
+ pool->create = &pool_create;
+ pool->fence = &pool_fence;
+ pool->kernel = &pool_kernel;
+ pool->validate = &pool_validate;
+ pool->unvalidate = &pool_unvalidate;
+ pool->waitIdle = &pool_waitIdle;
+ pool->takeDown = &pool_takedown;
+ pool->releasefromcpu = &pool_releaseFromCpu;
+ pool->syncforcpu = &pool_syncForCpu;
+
+ return pool;
+
+ out_err1:
+ free(slabPool->bucketSizes);
+ out_err0:
+ free(slabPool);
+
+ return NULL;
+}
diff --git a/src/wsbm_slabpool_new.c b/src/wsbm_slabpool_new.c
new file mode 100644
index 0000000..92ffb17
--- /dev/null
+++ b/src/wsbm_slabpool_new.c
@@ -0,0 +1,1213 @@
+/**************************************************************************
+ *
+ * Copyright 2006-2008 Tungsten Graphics, Inc., Cedar Park, TX., USA
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ *
+ **************************************************************************/
+/*
+ * Authors: Thomas Hellstrom <thomas-at-tungstengraphics-dot-com>
+ */
+
+#ifdef HAVE_CONFIG_H
+#include "config.h"
+#endif
+
+#include <ttm/ttm_placement_user.h>
+#include <stdint.h>
+#include <sys/time.h>
+#include <errno.h>
+#include <unistd.h>
+#include <assert.h>
+#include <string.h>
+#include <sys/mman.h>
+#include <xf86drm.h>
+#include "wsbm_pool.h"
+#include "wsbm_fencemgr.h"
+#include "wsbm_priv.h"
+#include "wsbm_manager.h"
+#include <stdio.h>
+
+
+#define WSBM_SLABPOOL_ALLOC_RETRIES 100
+#define DRMRESTARTCOMMANDWRITE(_fd, _val, _arg, _ret) \
+ do { \
+ (_ret) = drmCommandWrite(_fd, _val, &(_arg), sizeof(_arg)); \
+ } while ((_ret) == -EAGAIN || (_ret) == -ERESTART); \
+
+#define DRMRESTARTCOMMANDWRITEREAD(_fd, _val, _arg, _ret) \
+ do { \
+ (_ret) = drmCommandWriteRead(_fd, _val, &(_arg), sizeof(_arg)); \
+ } while ((_ret) == -EAGAIN || (_ret) == -ERESTART); \
+
+
+#ifdef DEBUG_FENCESIGNALED
+static int createbuffer = 0;
+static int fencesignaled = 0;
+#endif
+
+struct _WsbmSlab;
+
+struct _WsbmSlabBuffer {
+ struct _WsbmKernelBuf kBuf;
+ struct _WsbmBufStorage storage;
+ struct _WsbmCond event;
+
+ /*
+ * Remains constant after creation.
+ */
+
+ int isSlabBuffer;
+ struct _WsbmSlab *parent;
+ uint32_t start;
+ void *virtual;
+ unsigned long requestedSize;
+ uint64_t mapHandle;
+
+ /*
+ * Protected by struct _WsbmSlabSizeHeader::mutex
+ */
+
+ struct _WsbmListHead head;
+
+ /*
+ * Protected by this::mutex
+ */
+
+ struct _WsbmFenceObject *fence;
+ uint32_t fenceType;
+ struct _WsbmAtomic writers; /* (Only upping) */
+ int unFenced;
+};
+
+struct _WsbmSlabPool;
+struct _WsbmSlabKernelBO {
+
+ /*
+ * Constant at creation
+ */
+
+ struct _WsbmKernelBuf kBuf;
+ uint32_t pageAlignment;
+ void *virtual;
+ unsigned long actualSize;
+ uint64_t mapHandle;
+
+ /*
+ * Protected by struct _WsbmSlabCache::mutex
+ */
+
+ struct _WsbmSlabPool *slabPool;
+ uint32_t proposedPlacement;
+ struct _WsbmListHead timeoutHead;
+ struct _WsbmListHead head;
+ struct timeval timeFreed;
+};
+
+struct _WsbmSlab{
+ struct _WsbmListHead head;
+ struct _WsbmListHead freeBuffers;
+ uint32_t numBuffers;
+ uint32_t numFree;
+ struct _WsbmSlabBuffer *buffers;
+ struct _WsbmSlabSizeHeader *header;
+ struct _WsbmSlabKernelBO *kbo;
+};
+
+
+struct _WsbmSlabSizeHeader {
+ /*
+ * Constant at creation.
+ */
+ struct _WsbmSlabPool *slabPool;
+ uint32_t bufSize;
+
+ /*
+ * Protected by this::mutex
+ */
+
+ struct _WsbmListHead slabs;
+ struct _WsbmListHead freeSlabs;
+ struct _WsbmListHead delayedBuffers;
+ uint32_t numDelayed;
+ struct _WsbmMutex mutex;
+};
+
+struct _WsbmSlabCache {
+ struct timeval slabTimeout;
+ struct timeval checkInterval;
+ struct timeval nextCheck;
+ struct _WsbmListHead timeoutList;
+ struct _WsbmListHead unCached;
+ struct _WsbmListHead cached;
+ struct _WsbmMutex mutex;
+};
+
+
+struct _WsbmSlabPool {
+ struct _WsbmBufferPool pool;
+
+ /*
+ * The data of this structure remains constant after
+ * initialization and thus needs no mutex protection.
+ */
+
+ unsigned int devOffset;
+ struct _WsbmSlabCache *cache;
+ uint32_t proposedPlacement;
+ uint32_t validMask;
+ uint32_t *bucketSizes;
+ uint32_t numBuckets;
+ uint32_t pageSize;
+ int pageAlignment;
+ int maxSlabSize;
+ int desiredNumBuffers;
+ struct _WsbmSlabSizeHeader *headers;
+};
+
+static inline struct _WsbmSlabPool *
+slabPoolFromPool(struct _WsbmBufferPool *pool)
+{
+ return containerOf(pool, struct _WsbmSlabPool , pool);
+}
+
+static inline struct _WsbmSlabPool *
+slabPoolFromBuf(struct _WsbmSlabBuffer *sBuf)
+{
+ return slabPoolFromPool(sBuf->storage.pool);
+}
+
+static inline struct _WsbmSlabBuffer *
+slabBuffer(struct _WsbmBufStorage *buf)
+{
+ return containerOf(buf, struct _WsbmSlabBuffer, storage);
+}
+
+
+/*
+ * FIXME: Perhaps arrange timeout slabs in size buckets for fast
+ * retreival??
+ */
+
+
+static inline int
+wsbmTimeAfterEq(struct timeval *arg1, struct timeval *arg2)
+{
+ return ((arg1->tv_sec > arg2->tv_sec) ||
+ ((arg1->tv_sec == arg2->tv_sec) &&
+ (arg1->tv_usec > arg2->tv_usec)));
+}
+
+static inline void
+wsbmTimeAdd(struct timeval *arg, struct timeval *add)
+{
+ unsigned int sec;
+
+ arg->tv_sec += add->tv_sec;
+ arg->tv_usec += add->tv_usec;
+ sec = arg->tv_usec / 1000000;
+ arg->tv_sec += sec;
+ arg->tv_usec -= sec*1000000;
+}
+
+#include <stdio.h>
+
+static void
+wsbmFreeKernelBO(struct _WsbmSlabKernelBO *kbo)
+{
+ struct ttm_pl_reference_req arg;
+ struct _WsbmSlabPool *slabPool;
+
+ if (!kbo)
+ return;
+
+ fprintf(stderr, "Free kbo size %d\n", kbo->actualSize);
+ slabPool = kbo->slabPool;
+ arg.handle = kbo->kBuf.handle;
+ (void) munmap(kbo->virtual, kbo->actualSize);
+ (void) drmCommandWrite(slabPool->pool.fd, slabPool->devOffset + TTM_PL_UNREF,
+ &arg, sizeof(arg));
+ free(kbo);
+}
+
+
+static void
+wsbmFreeTimeoutKBOsLocked(struct _WsbmSlabCache *cache,
+ struct timeval *time)
+{
+ struct _WsbmListHead *list, *next;
+ struct _WsbmSlabKernelBO *kbo;
+
+ if (!wsbmTimeAfterEq(time, &cache->nextCheck))
+ return;
+
+ WSBMLISTFOREACHSAFE(list, next, &cache->timeoutList) {
+ kbo = WSBMLISTENTRY(list, struct _WsbmSlabKernelBO, timeoutHead);
+
+ if (!wsbmTimeAfterEq(time, &kbo->timeFreed))
+ break;
+
+ WSBMLISTDELINIT(&kbo->timeoutHead);
+ WSBMLISTDELINIT(&kbo->head);
+ wsbmFreeKernelBO(kbo);
+ }
+
+ cache->nextCheck = *time;
+ wsbmTimeAdd(&cache->nextCheck, &cache->checkInterval);
+}
+
+
+/*
+ * Add a _SlabKernelBO to the free slab manager.
+ * This means that it is available for reuse, but if it's not
+ * reused in a while, it will be freed.
+ */
+
+static void
+wsbmSetKernelBOFree(struct _WsbmSlabCache *cache,
+ struct _WsbmSlabKernelBO *kbo)
+{
+ struct timeval time;
+ struct timeval timeFreed;
+
+ gettimeofday(&time, NULL);
+ timeFreed = time;
+ WSBM_MUTEX_LOCK(&cache->mutex);
+ wsbmTimeAdd(&timeFreed, &cache->slabTimeout);
+ kbo->timeFreed = timeFreed;
+
+ if (kbo->kBuf.placement & TTM_PL_FLAG_CACHED)
+ WSBMLISTADD(&kbo->head, &cache->cached);
+ else
+ WSBMLISTADD(&kbo->head, &cache->unCached);
+
+ WSBMLISTADDTAIL(&kbo->timeoutHead, &cache->timeoutList);
+ wsbmFreeTimeoutKBOsLocked(cache, &time);
+
+ WSBM_MUTEX_UNLOCK(&cache->mutex);
+}
+
+/*
+ * Get a _SlabKernelBO for us to use as storage for a slab.
+ */
+
+
+static struct _WsbmSlabKernelBO *
+wsbmAllocKernelBO(struct _WsbmSlabSizeHeader *header)
+
+{
+ struct _WsbmSlabPool *slabPool = header->slabPool;
+ struct _WsbmSlabCache *cache = slabPool->cache;
+ struct _WsbmListHead *list, *head;
+ uint32_t size = header->bufSize * slabPool->desiredNumBuffers;
+ struct _WsbmSlabKernelBO *kbo;
+ struct _WsbmSlabKernelBO *kboTmp;
+ int ret;
+
+ /*
+ * FIXME: We should perhaps allow some variation in slabsize in order
+ * to efficiently reuse slabs.
+ */
+
+
+ size = (size <= slabPool->maxSlabSize) ? size : slabPool->maxSlabSize;
+ if (size < header->bufSize)
+ size = header->bufSize;
+ size = (size + slabPool->pageSize - 1) & ~(slabPool->pageSize - 1);
+ WSBM_MUTEX_LOCK(&cache->mutex);
+
+ kbo = NULL;
+
+ retry:
+ head = (slabPool->proposedPlacement & TTM_PL_FLAG_CACHED) ?
+ &cache->cached : &cache->unCached;
+
+ WSBMLISTFOREACH(list, head) {
+ kboTmp = WSBMLISTENTRY(list, struct _WsbmSlabKernelBO, head);
+ if ((kboTmp->actualSize == size) &&
+ (slabPool->pageAlignment == 0 ||
+ (kboTmp->pageAlignment % slabPool->pageAlignment) == 0)) {
+
+ if (!kbo)
+ kbo = kboTmp;
+
+ if ((kbo->proposedPlacement ^ slabPool->proposedPlacement) == 0)
+ break;
+
+ }
+ }
+
+ if (kbo) {
+ WSBMLISTDELINIT(&kbo->head);
+ WSBMLISTDELINIT(&kbo->timeoutHead);
+ }
+
+ WSBM_MUTEX_UNLOCK(&cache->mutex);
+
+ if (kbo) {
+ uint32_t new_mask = kbo->proposedPlacement ^ slabPool->proposedPlacement;
+
+ ret = 0;
+ if (new_mask) {
+ union ttm_pl_setstatus_arg arg;
+ struct ttm_pl_setstatus_req *req = &arg.req;
+ struct ttm_pl_rep *rep = &arg.rep;
+
+ req->handle = kbo->kBuf.handle;
+ req->set_placement = slabPool->proposedPlacement & new_mask;
+ req->clr_placement = ~slabPool->proposedPlacement & new_mask;
+ DRMRESTARTCOMMANDWRITEREAD(slabPool->pool.fd,
+ slabPool->devOffset + TTM_PL_SETSTATUS,
+ arg, ret);
+ if (ret == 0) {
+ kbo->kBuf.gpuOffset = rep->gpu_offset;
+ kbo->kBuf.placement = rep->placement;
+ }
+ kbo->proposedPlacement = slabPool->proposedPlacement;
+ }
+
+ if (ret == 0)
+ return kbo;
+
+ wsbmFreeKernelBO(kbo);
+ kbo = NULL;
+ goto retry;
+ }
+
+ fprintf(stderr, "New kbo 0x%08x size %d\n",
+ slabPool->proposedPlacement, size);
+ kbo = calloc(1, sizeof(*kbo));
+ if (!kbo)
+ return NULL;
+
+ {
+ union ttm_pl_create_arg arg;
+
+ kbo->slabPool = slabPool;
+ WSBMINITLISTHEAD(&kbo->head);
+ WSBMINITLISTHEAD(&kbo->timeoutHead);
+
+ arg.req.size = size;
+ arg.req.placement = slabPool->proposedPlacement;
+ arg.req.page_alignment = slabPool->pageAlignment;
+
+ DRMRESTARTCOMMANDWRITEREAD(slabPool->pool.fd,
+ slabPool->devOffset + TTM_PL_CREATE,
+ arg, ret);
+ if (ret)
+ goto out_err0;
+
+ kbo->kBuf.gpuOffset = arg.rep.gpu_offset;
+ kbo->kBuf.placement = arg.rep.placement;
+ kbo->kBuf.handle = arg.rep.handle;
+
+ kbo->actualSize = arg.rep.bo_size;
+ kbo->mapHandle = arg.rep.map_handle;
+ kbo->proposedPlacement = slabPool->proposedPlacement;
+ }
+
+ kbo->virtual = mmap(0, kbo->actualSize,
+ PROT_READ | PROT_WRITE, MAP_SHARED,
+ slabPool->pool.fd, kbo->mapHandle);
+
+ if (kbo->virtual == MAP_FAILED) {
+ ret = -errno;
+ goto out_err1;
+ }
+
+ return kbo;
+
+ out_err1:
+ {
+ struct ttm_pl_reference_req arg =
+ {.handle = kbo->kBuf.handle};
+
+ (void) drmCommandWrite(slabPool->pool.fd,
+ slabPool->devOffset + TTM_PL_UNREF,
+ &arg, sizeof(arg));
+ }
+ out_err0:
+ free(kbo);
+ return NULL;
+}
+
+
+static int
+wsbmAllocSlab(struct _WsbmSlabSizeHeader *header)
+{
+ struct _WsbmSlab *slab;
+ struct _WsbmSlabBuffer *sBuf;
+ uint32_t numBuffers;
+ int ret;
+ int i;
+
+ slab = calloc(1, sizeof(*slab));
+ if (!slab)
+ return -ENOMEM;
+
+ slab->kbo = wsbmAllocKernelBO(header);
+ if (!slab->kbo) {
+ ret = -ENOMEM;
+ goto out_err0;
+ }
+
+ numBuffers = slab->kbo->actualSize / header->bufSize;
+
+ slab->buffers = calloc(numBuffers, sizeof(*slab->buffers));
+ if (!slab->buffers) {
+ ret = -ENOMEM;
+ goto out_err1;
+ }
+
+ WSBMINITLISTHEAD(&slab->head);
+ WSBMINITLISTHEAD(&slab->freeBuffers);
+ slab->numBuffers = numBuffers;
+ slab->numFree = 0;
+ slab->header = header;
+
+ sBuf = slab->buffers;
+ for (i=0; i < numBuffers; ++i) {
+ ret = wsbmBufStorageInit(&sBuf->storage, &header->slabPool->pool);
+ if (ret)
+ goto out_err2;
+ sBuf->parent = slab;
+ sBuf->start = i* header->bufSize;
+ sBuf->virtual = (void *) ((uint8_t *) slab->kbo->virtual +
+ sBuf->start);
+ wsbmAtomicSet(&sBuf->writers, 0);
+ sBuf->isSlabBuffer = 1;
+ WSBM_COND_INIT(&sBuf->event);
+ WSBMLISTADDTAIL(&sBuf->head, &slab->freeBuffers);
+ slab->numFree++;
+ sBuf++;
+ }
+
+ WSBMLISTADDTAIL(&slab->head, &header->slabs);
+
+ return 0;
+
+ out_err2:
+ sBuf = slab->buffers;
+ for (i=0; i < numBuffers; ++i) {
+ if (sBuf->parent == slab) {
+ WSBM_COND_FREE(&sBuf->event);
+ wsbmBufStorageTakedown(&sBuf->storage);
+ }
+ sBuf++;
+ }
+ out_err1:
+ wsbmSetKernelBOFree(header->slabPool->cache, slab->kbo);
+ free(slab->buffers);
+ out_err0:
+ free(slab);
+ return ret;
+}
+
+/*
+ * Delete a buffer from the slab header delayed list and put
+ * it on the slab free list.
+ */
+
+static void
+wsbmSlabFreeBufferLocked(struct _WsbmSlabBuffer *buf)
+{
+ struct _WsbmSlab *slab = buf->parent;
+ struct _WsbmSlabSizeHeader *header = slab->header;
+ struct _WsbmListHead *list = &buf->head;
+
+ WSBMLISTDEL(list);
+ WSBMLISTADDTAIL(list, &slab->freeBuffers);
+ slab->numFree++;
+
+ if (slab->head.next == &slab->head)
+ WSBMLISTADDTAIL(&slab->head, &header->slabs);
+
+ if (slab->numFree == slab->numBuffers) {
+ list = &slab->head;
+ WSBMLISTDEL(list);
+ WSBMLISTADDTAIL(list, &header->freeSlabs);
+ }
+
+ if (header->slabs.next == &header->slabs ||
+ slab->numFree != slab->numBuffers) {
+
+ struct _WsbmListHead *next;
+ struct _WsbmSlabCache *cache = header->slabPool->cache;
+
+ WSBMLISTFOREACHSAFE(list, next, &header->freeSlabs) {
+ int i;
+ struct _WsbmSlabBuffer *sBuf;
+
+ slab = WSBMLISTENTRY(list, struct _WsbmSlab, head);
+ WSBMLISTDELINIT(list);
+
+ sBuf = slab->buffers;
+ for (i=0; i < slab->numBuffers; ++i) {
+ if (sBuf->parent == slab) {
+ WSBM_COND_FREE(&sBuf->event);
+ wsbmBufStorageTakedown(&sBuf->storage);
+ }
+ sBuf++;
+ }
+ wsbmSetKernelBOFree(cache, slab->kbo);
+ free(slab->buffers);
+ free(slab);
+ }
+ }
+}
+
+static void
+wsbmSlabCheckFreeLocked(struct _WsbmSlabSizeHeader *header, int wait)
+{
+ struct _WsbmListHead *list, *prev, *first;
+ struct _WsbmSlabBuffer *sBuf;
+ struct _WsbmSlab *slab;
+ int firstWasSignaled = 1;
+ int signaled;
+ int i;
+ int ret;
+
+ /*
+ * Rerun the freeing test if the youngest tested buffer
+ * was signaled, since there might be more idle buffers
+ * in the delay list.
+ */
+
+ while (firstWasSignaled) {
+ firstWasSignaled = 0;
+ signaled = 0;
+ first = header->delayedBuffers.next;
+
+ /* Only examine the oldest 1/3 of delayed buffers:
+ */
+ if (header->numDelayed > 3) {
+ for (i = 0; i < header->numDelayed; i += 3) {
+ first = first->next;
+ }
+ }
+
+ /*
+ * No need to take the buffer mutex for each buffer we loop
+ * through since we're currently the only user.
+ */
+
+
+ WSBMLISTFOREACHPREVSAFE(list, prev, first->next) {
+
+ if (list == &header->delayedBuffers)
+ break;
+
+ sBuf = WSBMLISTENTRY(list, struct _WsbmSlabBuffer, head);
+ slab = sBuf->parent;
+
+ if (!signaled) {
+ if (wait) {
+ ret = wsbmFenceFinish(sBuf->fence, sBuf->fenceType, 0);
+ if (ret)
+ break;
+ signaled = 1;
+ wait = 0;
+ } else {
+ signaled = wsbmFenceSignaled(sBuf->fence, sBuf->fenceType);
+#ifdef DEBUG_FENCESIGNALED
+ fencesignaled++;
+#endif
+ }
+ if (signaled) {
+ if (list == first)
+ firstWasSignaled = 1;
+ wsbmFenceUnReference(&sBuf->fence);
+ header->numDelayed--;
+ wsbmSlabFreeBufferLocked(sBuf);
+ } else
+ break;
+ } else if (wsbmFenceSignaledCached(sBuf->fence, sBuf->fenceType)) {
+ wsbmFenceUnReference(&sBuf->fence);
+ header->numDelayed--;
+ wsbmSlabFreeBufferLocked(sBuf);
+ }
+ }
+ }
+}
+
+
+static struct _WsbmSlabBuffer *
+wsbmSlabAllocBuffer(struct _WsbmSlabSizeHeader *header)
+{
+ static struct _WsbmSlabBuffer *buf;
+ struct _WsbmSlab *slab;
+ struct _WsbmListHead *list;
+ int count = WSBM_SLABPOOL_ALLOC_RETRIES;
+
+ WSBM_MUTEX_LOCK(&header->mutex);
+ while(header->slabs.next == &header->slabs && count > 0) {
+ wsbmSlabCheckFreeLocked(header, 0);
+ if (header->slabs.next != &header->slabs)
+ break;
+
+ WSBM_MUTEX_UNLOCK(&header->mutex);
+ if (count != WSBM_SLABPOOL_ALLOC_RETRIES)
+ usleep(1000);
+ WSBM_MUTEX_LOCK(&header->mutex);
+ (void) wsbmAllocSlab(header);
+ count--;
+ }
+
+ list = header->slabs.next;
+ if (list == &header->slabs) {
+ WSBM_MUTEX_UNLOCK(&header->mutex);
+ return NULL;
+ }
+ slab = WSBMLISTENTRY(list, struct _WsbmSlab, head);
+ if (--slab->numFree == 0)
+ WSBMLISTDELINIT(list);
+
+ list = slab->freeBuffers.next;
+ WSBMLISTDELINIT(list);
+
+ WSBM_MUTEX_UNLOCK(&header->mutex);
+ buf = WSBMLISTENTRY(list, struct _WsbmSlabBuffer, head);
+
+#ifdef DEBUG_FENCESIGNALED
+ createbuffer++;
+ if ((createbuffer % 1000) == 0)
+ _mesa_printf("Createbuffer %d fencesignaled %d\n",
+ createbuffer, fencesignaled);
+#endif
+ return buf;
+}
+
+static struct _WsbmBufStorage *
+pool_create(struct _WsbmBufferPool *pool, unsigned long size,
+ uint32_t placement, unsigned alignment)
+{
+ struct _WsbmSlabPool *slabPool = slabPoolFromPool(pool);
+ struct _WsbmSlabSizeHeader *header;
+ struct _WsbmSlabBuffer *sBuf;
+ int i;
+ int ret;
+
+ /*
+ * FIXME: Check for compatibility.
+ */
+
+ header = slabPool->headers;
+ for (i=0; i<slabPool->numBuckets; ++i) {
+ if (header->bufSize >= size)
+ break;
+ header++;
+ }
+
+ if (i < slabPool->numBuckets) {
+ sBuf = wsbmSlabAllocBuffer(header);
+ return ((sBuf) ? &sBuf->storage : NULL);
+ }
+
+
+ /*
+ * Fall back to allocate a buffer object directly from DRM.
+ * and wrap it in a wsbmBO structure.
+ */
+
+ sBuf = calloc(1, sizeof(*sBuf));
+
+ if (!sBuf)
+ return NULL;
+
+ if (alignment) {
+ if ((alignment < slabPool->pageSize) && (slabPool->pageSize % alignment))
+ goto out_err0;
+ if ((alignment > slabPool->pageSize) && (alignment % slabPool->pageSize))
+ goto out_err0;
+ }
+
+ ret = wsbmBufStorageInit(&sBuf->storage, &header->slabPool->pool);
+ if (ret)
+ goto out_err0;
+
+ ret = WSBM_COND_INIT(&sBuf->event);
+ if (ret)
+ goto out_err1;
+
+ {
+ union ttm_pl_create_arg arg;
+
+ arg.req.size = size;
+ arg.req.placement = placement;
+ arg.req.page_alignment = alignment / slabPool->pageSize;
+
+ DRMRESTARTCOMMANDWRITEREAD(pool->fd,
+ slabPool->devOffset + TTM_PL_CREATE,
+ arg, ret);
+
+ if (ret)
+ goto out_err2;
+
+ sBuf->kBuf.gpuOffset = arg.rep.gpu_offset;
+ sBuf->kBuf.placement = arg.rep.placement;
+ sBuf->kBuf.handle = arg.rep.handle;
+ sBuf->mapHandle = arg.rep.map_handle;
+ sBuf->requestedSize = size;
+
+ sBuf->virtual = mmap(0, size, PROT_READ | PROT_WRITE, MAP_SHARED,
+ pool->fd, sBuf->mapHandle);
+
+ if (sBuf->virtual == MAP_FAILED)
+ goto out_err3;
+
+ }
+
+ wsbmAtomicSet(&sBuf->writers, 0);
+ return &sBuf->storage;
+ out_err3:
+ {
+ struct ttm_pl_reference_req arg;
+
+ arg.handle = sBuf->kBuf.handle;
+ (void) drmCommandWriteRead(pool->fd,
+ slabPool->devOffset + TTM_PL_UNREF,
+ &arg, sizeof(arg));
+ }
+ out_err2:
+ WSBM_COND_FREE(&sBuf->event);
+ out_err1:
+ wsbmBufStorageTakedown(&sBuf->storage);
+ out_err0:
+ free(sBuf);
+ return NULL;
+}
+
+static void
+pool_destroy(struct _WsbmBufStorage **p_buf)
+{
+ struct _WsbmBufStorage *buf = *p_buf;
+ struct _WsbmSlabBuffer *sBuf = slabBuffer(buf);
+ struct _WsbmSlab *slab;
+ struct _WsbmSlabSizeHeader *header;
+
+ *p_buf = NULL;
+
+ if (!sBuf->isSlabBuffer) {
+ struct _WsbmSlabPool *slabPool = slabPoolFromBuf(sBuf);
+ struct ttm_pl_reference_req arg;
+
+ if (sBuf->virtual != NULL) {
+ (void) munmap(sBuf->virtual, sBuf->requestedSize);
+ sBuf->virtual = NULL;
+ }
+
+ arg.handle = sBuf->kBuf.handle;
+ (void) drmCommandWrite(slabPool->pool.fd,
+ slabPool->devOffset + TTM_PL_UNREF,
+ &arg, sizeof(arg));
+
+ WSBM_COND_FREE(&sBuf->event);
+ wsbmBufStorageTakedown(&sBuf->storage);
+ free(sBuf);
+ return;
+ }
+
+ slab = sBuf->parent;
+ header = slab->header;
+
+ /*
+ * No need to take the buffer mutex below since we're the only user.
+ */
+
+ WSBM_MUTEX_LOCK(&header->mutex);
+ sBuf->unFenced = 0;
+ wsbmAtomicSet(&sBuf->writers, 0);
+ wsbmAtomicSet(&sBuf->storage.refCount, 1);
+
+ if (sBuf->fence && !wsbmFenceSignaledCached(sBuf->fence, sBuf->fenceType)) {
+ WSBMLISTADDTAIL(&sBuf->head, &header->delayedBuffers);
+ header->numDelayed++;
+ } else {
+ if (sBuf->fence)
+ wsbmFenceUnReference(&sBuf->fence);
+ wsbmSlabFreeBufferLocked(sBuf);
+ }
+
+ WSBM_MUTEX_UNLOCK(&header->mutex);
+}
+
+
+static void
+waitIdleLocked(struct _WsbmSlabBuffer *sBuf, int lazy)
+{
+ struct _WsbmBufStorage *storage = &sBuf->storage;
+
+ while(sBuf->unFenced || sBuf->fence != NULL) {
+
+ if (sBuf->unFenced)
+ WSBM_COND_WAIT(&sBuf->event, &storage->mutex);
+
+ if (sBuf->fence != NULL) {
+ if (!wsbmFenceSignaled(sBuf->fence, sBuf->fenceType)) {
+ struct _WsbmFenceObject *fence =
+ wsbmFenceReference(sBuf->fence);
+
+ WSBM_MUTEX_UNLOCK(&storage->mutex);
+ (void) wsbmFenceFinish(fence, sBuf->fenceType, lazy);
+ WSBM_MUTEX_LOCK(&storage->mutex);
+ if (sBuf->fence == fence)
+ wsbmFenceUnReference(&sBuf->fence);
+
+ wsbmFenceUnReference(&fence);
+ } else {
+ wsbmFenceUnReference(&sBuf->fence);
+ }
+ }
+ }
+}
+
+static int
+pool_waitIdle(struct _WsbmBufStorage *buf, int lazy)
+{
+ struct _WsbmSlabBuffer *sBuf = slabBuffer(buf);
+
+ WSBM_MUTEX_LOCK(&buf->mutex);
+ waitIdleLocked(sBuf, lazy);
+ WSBM_MUTEX_UNLOCK(&buf->mutex);
+
+ return 0;
+}
+
+static int
+pool_map(struct _WsbmBufStorage *buf, unsigned mode, void **virtual)
+{
+ struct _WsbmSlabBuffer *sBuf = slabBuffer(buf);
+
+ *virtual = sBuf->virtual;
+
+ return 0;
+}
+
+static void
+pool_releaseFromCpu(struct _WsbmBufStorage *buf, unsigned mode)
+{
+ struct _WsbmSlabBuffer *sBuf = slabBuffer(buf);
+
+ if (wsbmAtomicDecZero(&sBuf->writers))
+ WSBM_COND_BROADCAST(&sBuf->event);
+}
+
+static int
+pool_syncForCpu(struct _WsbmBufStorage *buf, unsigned mode)
+{
+ struct _WsbmSlabBuffer *sBuf = slabBuffer(buf);
+ int ret = 0;
+
+ WSBM_MUTEX_LOCK(&buf->mutex);
+ if ((mode & WSBM_SYNCCPU_DONT_BLOCK)) {
+ int signaled;
+
+ if (sBuf->unFenced) {
+ ret = -EBUSY;
+ goto out_unlock;
+ }
+
+ if (sBuf->isSlabBuffer)
+ signaled = (sBuf->fence == NULL) ||
+ wsbmFenceSignaledCached(sBuf->fence, sBuf->fenceType);
+ else
+ signaled = (sBuf->fence == NULL) ||
+ wsbmFenceSignaled(sBuf->fence, sBuf->fenceType);
+
+ ret = 0;
+ if (signaled) {
+ wsbmFenceUnReference(&sBuf->fence);
+ wsbmAtomicInc(&sBuf->writers);
+ } else
+ ret = -EBUSY;
+ goto out_unlock;
+ }
+ waitIdleLocked(sBuf, 0);
+ wsbmAtomicInc(&sBuf->writers);
+ out_unlock:
+ WSBM_MUTEX_UNLOCK(&buf->mutex);
+ return ret;
+}
+
+static void
+pool_unmap(struct _WsbmBufStorage *buf)
+{
+ ;
+}
+
+static unsigned long
+pool_poolOffset(struct _WsbmBufStorage *buf)
+{
+ struct _WsbmSlabBuffer *sBuf = slabBuffer(buf);
+
+ return sBuf->start;
+}
+
+static unsigned long
+pool_size(struct _WsbmBufStorage *buf)
+{
+ struct _WsbmSlabBuffer *sBuf = slabBuffer(buf);
+ if (!sBuf->isSlabBuffer)
+ return sBuf->requestedSize;
+
+ return sBuf->parent->header->bufSize;
+}
+
+static struct _WsbmKernelBuf *
+pool_kernel(struct _WsbmBufStorage *buf)
+{
+ struct _WsbmSlabBuffer *sBuf = slabBuffer(buf);
+
+ return (sBuf->isSlabBuffer) ? &sBuf->parent->kbo->kBuf : &sBuf->kBuf;
+}
+
+
+static unsigned long
+pool_offset(struct _WsbmBufStorage *buf)
+{
+ return pool_kernel(buf)->gpuOffset + pool_poolOffset(buf);
+}
+
+
+static void
+pool_fence(struct _WsbmBufStorage *buf, struct _WsbmFenceObject *fence)
+{
+ struct _WsbmSlabBuffer *sBuf = slabBuffer(buf);
+ struct _WsbmKernelBuf *kBuf;
+
+ WSBM_MUTEX_LOCK(&buf->mutex);
+ if (sBuf->fence)
+ wsbmFenceUnReference(&sBuf->fence);
+
+ sBuf->fence = wsbmFenceReference(fence);
+ kBuf = pool_kernel(buf);
+ sBuf->fenceType = kBuf->fence_type_mask;
+ sBuf->unFenced = 0;
+ WSBM_COND_BROADCAST(&sBuf->event);
+ WSBM_MUTEX_UNLOCK(&buf->mutex);
+}
+
+static int
+pool_validate(struct _WsbmBufStorage *buf,
+ uint64_t set_flags,
+ uint64_t clr_flags)
+{
+ struct _WsbmSlabBuffer *sBuf = slabBuffer(buf);
+
+ WSBM_MUTEX_LOCK(&buf->mutex);
+ while(wsbmAtomicRead(&sBuf->writers) != 0) {
+ WSBM_COND_WAIT(&sBuf->event, &buf->mutex);
+ }
+
+ sBuf->unFenced = 1;
+ WSBM_MUTEX_UNLOCK(&buf->mutex);
+ return 0;
+}
+
+static void
+pool_unvalidate(struct _WsbmBufStorage *buf)
+{
+ struct _WsbmSlabBuffer *sBuf = slabBuffer(buf);
+
+ WSBM_MUTEX_LOCK(&buf->mutex);
+ if (sBuf->unFenced) {
+ sBuf->unFenced = 0;
+ WSBM_COND_BROADCAST(&sBuf->event);
+ }
+ WSBM_MUTEX_UNLOCK(&buf->mutex);
+}
+
+struct _WsbmSlabCache *
+wsbmSlabCacheInit(uint32_t checkIntervalMsec, uint32_t slabTimeoutMsec)
+{
+ struct _WsbmSlabCache *tmp;
+
+ tmp = calloc(1, sizeof(*tmp));
+ if (!tmp)
+ return NULL;
+
+ WSBM_MUTEX_INIT(&tmp->mutex);
+ WSBM_MUTEX_LOCK(&tmp->mutex);
+ tmp->slabTimeout.tv_usec = slabTimeoutMsec*1000;
+ tmp->slabTimeout.tv_sec = tmp->slabTimeout.tv_usec / 1000000;
+ tmp->slabTimeout.tv_usec -= tmp->slabTimeout.tv_sec*1000000;
+
+ tmp->checkInterval.tv_usec = checkIntervalMsec*1000;
+ tmp->checkInterval.tv_sec = tmp->checkInterval.tv_usec / 1000000;
+ tmp->checkInterval.tv_usec -= tmp->checkInterval.tv_sec*1000000;
+
+ gettimeofday(&tmp->nextCheck, NULL);
+ wsbmTimeAdd(&tmp->nextCheck, &tmp->checkInterval);
+ WSBMINITLISTHEAD(&tmp->timeoutList);
+ WSBMINITLISTHEAD(&tmp->unCached);
+ WSBMINITLISTHEAD(&tmp->cached);
+ WSBM_MUTEX_UNLOCK(&tmp->mutex);
+
+ return tmp;
+}
+
+void
+wsbmSlabCacheFinish(struct _WsbmSlabCache *cache)
+{
+ struct timeval time;
+
+ time = cache->nextCheck;
+ WSBM_MUTEX_LOCK(&cache->mutex);
+ wsbmTimeAdd(&time, &cache->checkInterval);
+ wsbmFreeTimeoutKBOsLocked(cache, &time);
+ WSBM_MUTEX_UNLOCK(&cache->mutex);
+
+ assert(cache->timeoutList.next == &cache->timeoutList);
+ assert(cache->unCached.next == &cache->unCached);
+ assert(cache->cached.next == &cache->cached);
+
+ WSBM_MUTEX_FREE(&cache->mutex);
+ free(cache);
+}
+
+static void
+wsbmInitSizeHeader(struct _WsbmSlabPool *slabPool, uint32_t size,
+ struct _WsbmSlabSizeHeader *header)
+{
+ WSBM_MUTEX_INIT(&header->mutex);
+ WSBM_MUTEX_LOCK(&header->mutex);
+
+ WSBMINITLISTHEAD(&header->slabs);
+ WSBMINITLISTHEAD(&header->freeSlabs);
+ WSBMINITLISTHEAD(&header->delayedBuffers);
+
+ header->numDelayed = 0;
+ header->slabPool = slabPool;
+ header->bufSize = size;
+
+ WSBM_MUTEX_UNLOCK(&header->mutex);
+}
+
+static void
+wsbmFinishSizeHeader(struct _WsbmSlabSizeHeader *header)
+{
+ struct _WsbmListHead *list, *next;
+ struct _WsbmSlabBuffer *sBuf;
+
+ WSBM_MUTEX_LOCK(&header->mutex);
+ WSBMLISTFOREACHSAFE(list, next, &header->delayedBuffers) {
+ sBuf = WSBMLISTENTRY(list, struct _WsbmSlabBuffer, head);
+ if (sBuf->fence) {
+ (void) wsbmFenceFinish(sBuf->fence, sBuf->fenceType, 0);
+ wsbmFenceUnReference(&sBuf->fence);
+ }
+ header->numDelayed--;
+ wsbmSlabFreeBufferLocked(sBuf);
+ }
+ WSBM_MUTEX_UNLOCK(&header->mutex);
+ WSBM_MUTEX_FREE(&header->mutex);
+}
+
+
+static void
+pool_takedown(struct _WsbmBufferPool *pool)
+{
+ struct _WsbmSlabPool *slabPool = slabPoolFromPool(pool);
+ int i;
+
+ for (i=0; i<slabPool->numBuckets; ++i) {
+ wsbmFinishSizeHeader(&slabPool->headers[i]);
+ }
+
+ free(slabPool->headers);
+ free(slabPool->bucketSizes);
+ free(slabPool);
+}
+
+struct _WsbmBufferPool *
+wsbmSlabPoolInit(int fd,
+ uint32_t devOffset,
+ uint32_t placement,
+ uint32_t validMask,
+ uint32_t smallestSize,
+ uint32_t numSizes,
+ uint32_t desiredNumBuffers,
+ uint32_t maxSlabSize,
+ uint32_t pageAlignment,
+ struct _WsbmSlabCache *cache)
+{
+ struct _WsbmBufferPool *pool;
+ struct _WsbmSlabPool *slabPool;
+ uint32_t i;
+
+ slabPool = calloc(1, sizeof(*slabPool));
+ if (!slabPool)
+ return NULL;
+
+ pool = &slabPool->pool;
+
+ slabPool->bucketSizes = calloc(numSizes, sizeof(*slabPool->bucketSizes));
+ if (!slabPool->bucketSizes)
+ goto out_err0;
+
+ slabPool->headers = calloc(numSizes, sizeof(*slabPool->headers));
+ if (!slabPool->headers)
+ goto out_err1;
+
+ slabPool->devOffset = devOffset;
+ slabPool->cache = cache;
+ slabPool->proposedPlacement = placement;
+ slabPool->validMask = validMask;
+ slabPool->numBuckets = numSizes;
+ slabPool->pageSize = getpagesize();
+ slabPool->pageAlignment = pageAlignment;
+ slabPool->maxSlabSize = maxSlabSize;
+ slabPool->desiredNumBuffers = desiredNumBuffers;
+
+ for (i=0; i<slabPool->numBuckets; ++i) {
+ slabPool->bucketSizes[i] = (smallestSize << i);
+ wsbmInitSizeHeader(slabPool, slabPool->bucketSizes[i],
+ &slabPool->headers[i]);
+ }
+
+ pool->fd = fd;
+ pool->map = &pool_map;
+ pool->unmap = &pool_unmap;
+ pool->destroy = &pool_destroy;
+ pool->offset = &pool_offset;
+ pool->poolOffset = &pool_poolOffset;
+ pool->size = &pool_size;
+ pool->create = &pool_create;
+ pool->fence = &pool_fence;
+ pool->kernel = &pool_kernel;
+ pool->validate = &pool_validate;
+ pool->unvalidate = &pool_unvalidate;
+ pool->waitIdle = &pool_waitIdle;
+ pool->takeDown = &pool_takedown;
+ pool->releasefromcpu = &pool_releaseFromCpu;
+ pool->syncforcpu = &pool_syncForCpu;
+
+ return pool;
+
+ out_err1:
+ free(slabPool->bucketSizes);
+ out_err0:
+ free(slabPool);
+
+ return NULL;
+}
diff --git a/src/wsbm_ttmpool.c b/src/wsbm_ttmpool.c
new file mode 100644
index 0000000..c8dd482
--- /dev/null
+++ b/src/wsbm_ttmpool.c
@@ -0,0 +1,511 @@
+/**************************************************************************
+ *
+ * Copyright 2006-2008 Tungsten Graphics, Inc., Cedar Park, Tx., USA
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ *
+ **************************************************************************/
+/*
+ * Authors: Thomas Hellström <thomas-at-tungstengraphics-dot-com>
+ */
+#ifdef HAVE_CONFIG_H
+#include "config.h"
+#endif
+
+#include <xf86drm.h>
+#include <stdlib.h>
+#include <unistd.h>
+#include <errno.h>
+#include <sys/mman.h>
+#include "wsbm_pool.h"
+#include "assert.h"
+#include "wsbm_priv.h"
+#include "wsbm_manager.h"
+#include "ttm/ttm_placement_user.h"
+
+#define DRMRESTARTCOMMANDWRITE(_fd, _val, _arg, _ret) \
+ do { \
+ (_ret) = drmCommandWrite(_fd, _val, &(_arg), sizeof(_arg)); \
+ } while ((_ret) == -EAGAIN || (_ret) == -ERESTART); \
+
+#define DRMRESTARTCOMMANDWRITEREAD(_fd, _val, _arg, _ret) \
+ do { \
+ (_ret) = drmCommandWriteRead(_fd, _val, &(_arg), sizeof(_arg)); \
+ } while ((_ret) == -EAGAIN || (_ret) == -ERESTART); \
+
+/*
+ * Buffer pool implementation using DRM buffer objects as wsbm buffer objects.
+ */
+
+struct _TTMBuffer
+{
+ struct _WsbmBufStorage buf;
+ struct _WsbmCond event;
+
+ /*
+ * Remains constant after creation.
+ */
+
+ uint64_t requestedSize;
+ uint64_t mapHandle;
+ uint64_t realSize;
+
+ /*
+ * Protected by the kernel lock.
+ */
+
+ struct _WsbmKernelBuf kBuf;
+
+ /*
+ * Protected by the mutex.
+ */
+
+ void *virtual;
+ int syncInProgress;
+ unsigned readers;
+ unsigned writers;
+};
+
+struct _TTMPool
+{
+ struct _WsbmBufferPool pool;
+ unsigned int pageSize;
+ unsigned int devOffset;
+};
+
+static inline struct _TTMPool *
+ttmGetPool(struct _TTMBuffer *dBuf)
+{
+ return containerOf(dBuf->buf.pool, struct _TTMPool, pool);
+}
+
+static inline struct _TTMBuffer *
+ttmBuffer(struct _WsbmBufStorage *buf)
+{
+ return containerOf(buf, struct _TTMBuffer, buf);
+}
+
+static struct _WsbmBufStorage *
+pool_create(struct _WsbmBufferPool *pool,
+ unsigned long size, uint32_t placement, unsigned alignment)
+{
+ struct _TTMBuffer *dBuf = (struct _TTMBuffer *)
+ calloc(1, sizeof(*dBuf));
+ struct _TTMPool *ttmPool = containerOf(pool, struct _TTMPool, pool);
+ int ret;
+ unsigned pageSize = ttmPool->pageSize;
+ union ttm_pl_create_arg arg;
+
+ if (!dBuf)
+ return NULL;
+
+ if ((alignment > pageSize) && (alignment % pageSize))
+ goto out_err0;
+
+ ret = wsbmBufStorageInit(&dBuf->buf, pool);
+ if (ret)
+ goto out_err0;
+
+ ret = WSBM_COND_INIT(&dBuf->event);
+ if (ret)
+ goto out_err1;
+
+ arg.req.size = size;
+ arg.req.placement = placement;
+ arg.req.page_alignment = alignment / pageSize;
+
+ DRMRESTARTCOMMANDWRITEREAD(pool->fd, ttmPool->devOffset + TTM_PL_CREATE,
+ arg, ret);
+
+ if (ret)
+ goto out_err2;
+
+ dBuf->requestedSize = size;
+ dBuf->kBuf.gpuOffset = arg.rep.gpu_offset;
+ dBuf->mapHandle = arg.rep.map_handle;
+ dBuf->realSize = arg.rep.bo_size;
+ dBuf->kBuf.placement = arg.rep.placement;
+ dBuf->kBuf.handle = arg.rep.handle;
+
+ return &dBuf->buf;
+
+ out_err2:
+ WSBM_COND_FREE(&dBuf->event);
+ out_err1:
+ wsbmBufStorageTakedown(&dBuf->buf);
+ out_err0:
+ free(dBuf);
+ return NULL;
+}
+
+static struct _WsbmBufStorage *
+pool_reference(struct _WsbmBufferPool *pool, unsigned handle)
+{
+ struct _TTMBuffer *dBuf = (struct _TTMBuffer *)calloc(1, sizeof(*dBuf));
+ struct _TTMPool *ttmPool = containerOf(pool, struct _TTMPool, pool);
+ union ttm_pl_reference_arg arg;
+ int ret;
+
+ if (!dBuf)
+ return NULL;
+
+ ret = wsbmBufStorageInit(&dBuf->buf, pool);
+ if (ret)
+ goto out_err0;
+
+ ret = WSBM_COND_INIT(&dBuf->event);
+ if (ret)
+ goto out_err1;
+
+ arg.req.handle = handle;
+ ret = drmCommandWriteRead(pool->fd, ttmPool->devOffset + TTM_PL_REFERENCE,
+ &arg, sizeof(arg));
+
+ if (ret)
+ goto out_err2;
+
+ dBuf->requestedSize = arg.rep.bo_size;
+ dBuf->kBuf.gpuOffset = arg.rep.gpu_offset;
+ dBuf->mapHandle = arg.rep.map_handle;
+ dBuf->realSize = arg.rep.bo_size;
+ dBuf->kBuf.placement = arg.rep.placement;
+ dBuf->kBuf.handle = arg.rep.handle;
+ dBuf->kBuf.fence_type_mask = arg.rep.sync_object_arg;
+
+ return &dBuf->buf;
+
+ out_err2:
+ WSBM_COND_FREE(&dBuf->event);
+ out_err1:
+ wsbmBufStorageTakedown(&dBuf->buf);
+ out_err0:
+ free(dBuf);
+ return NULL;
+}
+
+static void
+pool_destroy(struct _WsbmBufStorage **buf)
+{
+ struct _TTMBuffer *dBuf = ttmBuffer(*buf);
+ struct _TTMPool *ttmPool = ttmGetPool(dBuf);
+ struct ttm_pl_reference_req arg;
+
+ *buf = NULL;
+ if (dBuf->virtual != NULL) {
+ (void)munmap(dBuf->virtual, dBuf->requestedSize);
+ dBuf->virtual = NULL;
+ }
+ arg.handle = dBuf->kBuf.handle;
+ (void) drmCommandWrite(dBuf->buf.pool->fd,
+ ttmPool->devOffset + TTM_PL_UNREF,
+ &arg, sizeof(arg));
+
+ WSBM_COND_FREE(&dBuf->event);
+ wsbmBufStorageTakedown(&dBuf->buf);
+ free(dBuf);
+}
+
+static int
+syncforcpu_locked(struct _WsbmBufStorage *buf, unsigned mode)
+{
+ uint32_t kmode = 0;
+ struct _TTMBuffer *dBuf = ttmBuffer(buf);
+ struct _TTMPool *ttmPool = ttmGetPool(dBuf);
+ unsigned int readers;
+ unsigned int writers;
+ int ret = 0;
+
+ while (dBuf->syncInProgress)
+ WSBM_COND_WAIT(&dBuf->event, &buf->mutex);
+
+ readers = dBuf->readers;
+ writers = dBuf->writers;
+
+ if ((mode & WSBM_SYNCCPU_READ) && (++dBuf->readers == 1))
+ kmode |= TTM_PL_SYNCCPU_MODE_READ;
+
+ if ((mode & WSBM_SYNCCPU_WRITE) && (++dBuf->writers == 1))
+ kmode |= TTM_PL_SYNCCPU_MODE_WRITE;
+
+ if (kmode) {
+ struct ttm_pl_synccpu_arg arg;
+
+ if (mode & WSBM_SYNCCPU_DONT_BLOCK)
+ kmode |= TTM_PL_SYNCCPU_MODE_NO_BLOCK;
+
+ dBuf->syncInProgress = 1;
+
+
+ /*
+ * This might be a lengthy wait, so
+ * release the mutex.
+ */
+
+ WSBM_MUTEX_UNLOCK(&buf->mutex);
+
+ arg.handle = dBuf->kBuf.handle;
+ arg.access_mode = kmode;
+ arg.op = TTM_PL_SYNCCPU_OP_GRAB;
+
+ DRMRESTARTCOMMANDWRITE(dBuf->buf.pool->fd,
+ ttmPool->devOffset + TTM_PL_SYNCCPU, arg, ret);
+
+ WSBM_MUTEX_LOCK(&buf->mutex);
+ dBuf->syncInProgress = 0;
+ WSBM_COND_BROADCAST(&dBuf->event);
+
+ if (ret) {
+ dBuf->readers = readers;
+ dBuf->writers = writers;
+ }
+ }
+
+ return ret;
+}
+
+static int
+releasefromcpu_locked(struct _WsbmBufStorage *buf, unsigned mode)
+{
+ uint32_t kmode = 0;
+ struct _TTMBuffer *dBuf = ttmBuffer(buf);
+ struct _TTMPool *ttmPool = ttmGetPool(dBuf);
+ int ret = 0;
+
+ while (dBuf->syncInProgress)
+ WSBM_COND_WAIT(&dBuf->event, &buf->mutex);
+
+ if ((mode & WSBM_SYNCCPU_READ) && (--dBuf->readers == 0))
+ kmode |= TTM_PL_SYNCCPU_MODE_READ;
+
+ if ((mode & WSBM_SYNCCPU_WRITE) && (--dBuf->writers == 0))
+ kmode |= TTM_PL_SYNCCPU_MODE_WRITE;
+
+ if (kmode) {
+ struct ttm_pl_synccpu_arg arg;
+
+ arg.handle = dBuf->kBuf.handle;
+ arg.access_mode = kmode;
+ arg.op = TTM_PL_SYNCCPU_OP_RELEASE;
+
+ DRMRESTARTCOMMANDWRITE(dBuf->buf.pool->fd,
+ ttmPool->devOffset + TTM_PL_SYNCCPU, arg, ret);
+
+ }
+
+ return ret;
+}
+
+static int
+pool_syncforcpu(struct _WsbmBufStorage *buf, unsigned mode)
+{
+ int ret;
+
+ WSBM_MUTEX_LOCK(&buf->mutex);
+ ret = syncforcpu_locked(buf, mode);
+ WSBM_MUTEX_UNLOCK(&buf->mutex);
+ return ret;
+}
+
+static void
+pool_releasefromcpu(struct _WsbmBufStorage *buf, unsigned mode)
+{
+ WSBM_MUTEX_LOCK(&buf->mutex);
+ (void) releasefromcpu_locked(buf, mode);
+ WSBM_MUTEX_UNLOCK(&buf->mutex);
+}
+
+
+static int
+pool_map(struct _WsbmBufStorage *buf, unsigned mode, void **virtual)
+{
+ struct _TTMBuffer *dBuf = ttmBuffer(buf);
+ void *virt;
+ int ret = 0;
+
+
+ WSBM_MUTEX_LOCK(&buf->mutex);
+
+ /*
+ * mmaps are expensive, so we only really unmap if
+ * we destroy the buffer.
+ */
+
+ if (dBuf->virtual == NULL) {
+ virt = mmap(0, dBuf->requestedSize,
+ PROT_READ | PROT_WRITE, MAP_SHARED,
+ buf->pool->fd, dBuf->mapHandle);
+ if (virt == MAP_FAILED) {
+ ret = -errno;
+ goto out_unlock;
+ }
+ dBuf->virtual = virt;
+ }
+
+ *virtual = dBuf->virtual;
+ out_unlock:
+
+ WSBM_MUTEX_UNLOCK(&buf->mutex);
+
+ return ret;
+}
+
+static void
+pool_unmap(struct _WsbmBufStorage *buf)
+{
+ ;
+}
+
+static unsigned long
+pool_offset(struct _WsbmBufStorage *buf)
+{
+ struct _TTMBuffer *dBuf = ttmBuffer(buf);
+
+ return dBuf->kBuf.gpuOffset;
+}
+
+static unsigned long
+pool_poolOffset(struct _WsbmBufStorage *buf)
+{
+ return 0;
+}
+
+static uint32_t
+pool_placement(struct _WsbmBufStorage *buf)
+{
+ struct _TTMBuffer *dBuf = ttmBuffer(buf);
+
+ return dBuf->kBuf.placement;
+}
+
+static unsigned long
+pool_size(struct _WsbmBufStorage *buf)
+{
+ struct _TTMBuffer *dBuf = ttmBuffer(buf);
+
+ return dBuf->realSize;
+}
+
+static void
+pool_fence(struct _WsbmBufStorage *buf, struct _WsbmFenceObject *fence)
+{
+ /*
+ * Noop. The kernel handles all fencing.
+ */
+}
+
+static int
+pool_waitIdle(struct _WsbmBufStorage *buf, int lazy)
+{
+ struct _TTMBuffer *dBuf = ttmBuffer(buf);
+ struct _TTMPool *ttmPool = ttmGetPool(dBuf);
+ struct ttm_pl_waitidle_arg req;
+ struct _WsbmBufferPool *pool = buf->pool;
+ int ret;
+
+ req.handle = dBuf->kBuf.handle;
+ req.mode = (lazy) ? TTM_PL_WAITIDLE_MODE_LAZY : 0;
+
+ DRMRESTARTCOMMANDWRITE(pool->fd, ttmPool->devOffset + TTM_PL_WAITIDLE,
+ req, ret);
+
+ return ret;
+}
+
+static void
+pool_takedown(struct _WsbmBufferPool *pool)
+{
+ struct _TTMPool *ttmPool = containerOf(pool, struct _TTMPool, pool);
+
+ free(ttmPool);
+}
+
+static int
+pool_setStatus(struct _WsbmBufStorage *buf, uint32_t set_placement,
+ uint32_t clr_placement)
+{
+ struct _TTMBuffer *dBuf = ttmBuffer(buf);
+ struct _TTMPool *ttmPool = ttmGetPool(dBuf);
+ union ttm_pl_setstatus_arg arg;
+ struct ttm_pl_setstatus_req *req = &arg.req;
+ struct ttm_pl_rep *rep = &arg.rep;
+ struct _WsbmBufferPool *pool = buf->pool;
+ int ret;
+
+ req->handle = dBuf->kBuf.handle;
+ req->set_placement = set_placement;
+ req->clr_placement = clr_placement;
+
+ DRMRESTARTCOMMANDWRITEREAD(pool->fd,
+ ttmPool->devOffset + TTM_PL_SETSTATUS,
+ arg, ret);
+
+ if (!ret) {
+ dBuf->kBuf.gpuOffset = rep->gpu_offset;
+ dBuf->kBuf.placement = rep->placement;
+ }
+
+ return ret;
+}
+
+static struct _WsbmKernelBuf *
+pool_kernel(struct _WsbmBufStorage *buf)
+{
+ return (void *)&ttmBuffer(buf)->kBuf;
+}
+
+struct _WsbmBufferPool *
+wsbmTTMPoolInit(int fd, unsigned int devOffset)
+{
+ struct _TTMPool *ttmPool;
+ struct _WsbmBufferPool *pool;
+
+ ttmPool = (struct _TTMPool *)calloc(1, sizeof(*ttmPool));
+
+ if (!ttmPool)
+ return NULL;
+
+ ttmPool->pageSize = getpagesize();
+ ttmPool->devOffset = devOffset;
+ pool = &ttmPool->pool;
+
+ pool->fd = fd;
+ pool->map = &pool_map;
+ pool->unmap = &pool_unmap;
+ pool->syncforcpu = &pool_syncforcpu;
+ pool->releasefromcpu = &pool_releasefromcpu;
+ pool->destroy = &pool_destroy;
+ pool->offset = &pool_offset;
+ pool->poolOffset = &pool_poolOffset;
+ pool->placement = &pool_placement;
+ pool->size = &pool_size;
+ pool->create = &pool_create;
+ pool->fence = &pool_fence;
+ pool->kernel = &pool_kernel;
+ pool->validate = NULL;
+ pool->unvalidate = NULL;
+ pool->waitIdle = &pool_waitIdle;
+ pool->takeDown = &pool_takedown;
+ pool->createByReference = &pool_reference;
+ pool->setStatus = &pool_setStatus;
+ return pool;
+}
diff --git a/src/wsbm_userpool.c b/src/wsbm_userpool.c
new file mode 100644
index 0000000..e859662
--- /dev/null
+++ b/src/wsbm_userpool.c
@@ -0,0 +1,691 @@
+/**************************************************************************
+ *
+ * Copyright 2008 Tungsten Graphics, Inc., Cedar Park, TX., USA
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ *
+ **************************************************************************/
+/*
+ * Authors: Thomas Hellstrom <thomas-at-tungstengraphics-dot-com>
+ */
+
+#ifdef HAVE_CONFIG_H
+#include "config.h"
+#endif
+
+#include <stdint.h>
+#include <errno.h>
+#include <unistd.h>
+#include <assert.h>
+#include <stdio.h>
+#include <string.h>
+#include "wsbm_pool.h"
+#include "wsbm_fencemgr.h"
+#include "wsbm_manager.h"
+#include "wsbm_mm.h"
+#include "wsbm_priv.h"
+
+/*
+ * Malloced memory must be aligned to 16 bytes, since that's what
+ * the DMA bitblt requires.
+ */
+
+#define WSBM_USER_ALIGN_ADD 16
+#define WSBM_USER_ALIGN_SYSMEM(_val) \
+ ((void *)(((unsigned long) (_val) + 15) & ~15))
+
+struct _WsbmUserBuffer
+{
+ struct _WsbmBufStorage buf;
+ struct _WsbmKernelBuf kBuf;
+
+ /* Protected by the pool mutex */
+
+ struct _WsbmListHead lru;
+ struct _WsbmListHead delayed;
+
+ /* Protected by the buffer mutex */
+
+ unsigned long size;
+ unsigned long alignment;
+
+ struct _WsbmCond event;
+ uint32_t proposedPlacement;
+ uint32_t newFenceType;
+
+ void *map;
+ void *sysmem;
+ int unFenced;
+ struct _WsbmFenceObject *fence;
+ struct _WsbmMMNode *node;
+
+ struct _WsbmAtomic writers;
+};
+
+struct _WsbmUserPool
+{
+ /*
+ * Constant after initialization.
+ */
+
+ struct _WsbmBufferPool pool;
+ unsigned long agpOffset;
+ unsigned long agpMap;
+ unsigned long agpSize;
+ unsigned long vramOffset;
+ unsigned long vramMap;
+ unsigned long vramSize;
+ struct _WsbmMutex mutex;
+ struct _WsbmListHead delayed;
+ struct _WsbmListHead vramLRU;
+ struct _WsbmListHead agpLRU;
+ struct _WsbmMM vramMM;
+ struct _WsbmMM agpMM;
+ uint32_t (*fenceTypes) (uint64_t);
+};
+
+static inline struct _WsbmUserPool *
+userPool(struct _WsbmUserBuffer *buf)
+{
+ return containerOf(buf->buf.pool, struct _WsbmUserPool, pool);
+}
+
+static inline struct _WsbmUserBuffer *
+userBuf(struct _WsbmBufStorage *buf)
+{
+ return containerOf(buf, struct _WsbmUserBuffer, buf);
+}
+
+static void
+waitIdleLocked(struct _WsbmBufStorage *buf, int lazy)
+{
+ struct _WsbmUserBuffer *vBuf = userBuf(buf);
+
+ while (vBuf->unFenced || vBuf->fence != NULL) {
+ if (vBuf->unFenced)
+ WSBM_COND_WAIT(&vBuf->event, &buf->mutex);
+
+ if (vBuf->fence != NULL) {
+ if (!wsbmFenceSignaled(vBuf->fence, vBuf->kBuf.fence_type_mask)) {
+ struct _WsbmFenceObject *fence =
+ wsbmFenceReference(vBuf->fence);
+
+ WSBM_MUTEX_UNLOCK(&buf->mutex);
+ (void)wsbmFenceFinish(fence, vBuf->kBuf.fence_type_mask,
+ lazy);
+ WSBM_MUTEX_LOCK(&buf->mutex);
+
+ if (vBuf->fence == fence)
+ wsbmFenceUnreference(&vBuf->fence);
+
+ wsbmFenceUnreference(&fence);
+ } else {
+ wsbmFenceUnreference(&vBuf->fence);
+ }
+ }
+ }
+}
+
+static int
+pool_waitIdle(struct _WsbmBufStorage *buf, int lazy)
+{
+ WSBM_MUTEX_UNLOCK(&buf->mutex);
+ waitIdleLocked(buf, lazy);
+ WSBM_MUTEX_UNLOCK(&buf->mutex);
+
+ return 0;
+}
+
+static int
+evict_lru(struct _WsbmListHead *lru)
+{
+ struct _WsbmUserBuffer *vBuf;
+ struct _WsbmUserPool *p;
+ struct _WsbmListHead *list = lru->next;
+ int err;
+
+ if (list == lru) {
+ return -ENOMEM;
+ }
+
+ vBuf = WSBMLISTENTRY(list, struct _WsbmUserBuffer, lru);
+ p = userPool(vBuf);
+ WSBM_MUTEX_UNLOCK(&p->mutex);
+ WSBM_MUTEX_LOCK(&vBuf->buf.mutex);
+ WSBM_MUTEX_LOCK(&p->mutex);
+
+ vBuf->sysmem = malloc(vBuf->size + WSBM_USER_ALIGN_ADD);
+
+ if (!vBuf->sysmem) {
+ err = -ENOMEM;
+ goto out_unlock;
+ }
+
+ (void)wsbmFenceFinish(vBuf->fence, vBuf->kBuf.fence_type_mask, 0);
+ wsbmFenceUnreference(&vBuf->fence);
+
+ memcpy(WSBM_USER_ALIGN_SYSMEM(vBuf->sysmem), vBuf->map, vBuf->size);
+ WSBMLISTDELINIT(&vBuf->lru);
+ vBuf->kBuf.placement = WSBM_PL_FLAG_SYSTEM;
+ vBuf->map = WSBM_USER_ALIGN_SYSMEM(vBuf->sysmem);
+
+ /*
+ * FIXME: Free memory.
+ */
+
+ err = 0;
+ out_unlock:
+ WSBM_MUTEX_UNLOCK(&vBuf->buf.mutex);
+ return err;
+}
+
+static struct _WsbmBufStorage *
+pool_create(struct _WsbmBufferPool *pool,
+ unsigned long size, uint32_t placement, unsigned alignment)
+{
+ struct _WsbmUserPool *p = containerOf(pool, struct _WsbmUserPool, pool);
+ struct _WsbmUserBuffer *vBuf = calloc(1, sizeof(*vBuf));
+
+ if (!vBuf)
+ return NULL;
+
+ wsbmBufStorageInit(&vBuf->buf, pool);
+ vBuf->sysmem = NULL;
+ vBuf->proposedPlacement = placement;
+ vBuf->size = size;
+ vBuf->alignment = alignment;
+
+ WSBMINITLISTHEAD(&vBuf->lru);
+ WSBMINITLISTHEAD(&vBuf->delayed);
+ WSBM_MUTEX_LOCK(&p->mutex);
+
+ if (placement & WSBM_PL_FLAG_TT) {
+ vBuf->node = wsbmMMSearchFree(&p->agpMM, size, alignment, 1);
+ if (vBuf->node)
+ vBuf->node = wsbmMMGetBlock(vBuf->node, size, alignment);
+
+ if (vBuf->node) {
+ vBuf->kBuf.placement = WSBM_PL_FLAG_TT;
+ vBuf->kBuf.gpuOffset = p->agpOffset + vBuf->node->start;
+ vBuf->map = (void *)(p->agpMap + vBuf->node->start);
+ WSBMLISTADDTAIL(&vBuf->lru, &p->agpLRU);
+ goto have_mem;
+ }
+ }
+
+ if (placement & WSBM_PL_FLAG_VRAM) {
+ vBuf->node = wsbmMMSearchFree(&p->vramMM, size, alignment, 1);
+ if (vBuf->node)
+ vBuf->node = wsbmMMGetBlock(vBuf->node, size, alignment);
+
+ if (vBuf->node) {
+ vBuf->kBuf.placement = WSBM_PL_FLAG_VRAM;
+ vBuf->kBuf.gpuOffset = p->vramOffset + vBuf->node->start;
+ vBuf->map = (void *)(p->vramMap + vBuf->node->start);
+ WSBMLISTADDTAIL(&vBuf->lru, &p->vramLRU);
+ goto have_mem;
+ }
+ }
+
+ if ((placement & WSBM_PL_FLAG_NO_EVICT)
+ && !(placement & WSBM_PL_FLAG_SYSTEM)) {
+ WSBM_MUTEX_UNLOCK(&p->mutex);
+ goto out_err;
+ }
+
+ vBuf->sysmem = malloc(size + WSBM_USER_ALIGN_ADD);
+ vBuf->kBuf.placement = WSBM_PL_FLAG_SYSTEM;
+ vBuf->map = WSBM_USER_ALIGN_SYSMEM(vBuf->sysmem);
+
+ have_mem:
+
+ WSBM_MUTEX_UNLOCK(&p->mutex);
+ if (vBuf->sysmem != NULL
+ || (!(vBuf->kBuf.placement & WSBM_PL_FLAG_SYSTEM)))
+ return &vBuf->buf;
+ out_err:
+ free(vBuf);
+ return NULL;
+}
+
+static int
+pool_validate(struct _WsbmBufStorage *buf, uint64_t set_flags,
+ uint64_t clr_flags)
+{
+ struct _WsbmUserBuffer *vBuf = userBuf(buf);
+ struct _WsbmUserPool *p = userPool(vBuf);
+ int err = -ENOMEM;
+
+ WSBM_MUTEX_LOCK(&buf->mutex);
+
+ while(wsbmAtomicRead(&vBuf->writers) != 0)
+ WSBM_COND_WAIT(&vBuf->event, &buf->mutex);
+
+ vBuf->unFenced = 1;
+
+ WSBM_MUTEX_LOCK(&p->mutex);
+ WSBMLISTDELINIT(&vBuf->lru);
+
+ vBuf->proposedPlacement =
+ (vBuf->proposedPlacement | set_flags) & ~clr_flags;
+
+ if ((vBuf->proposedPlacement & vBuf->kBuf.placement & WSBM_PL_MASK_MEM) ==
+ vBuf->kBuf.placement) {
+ err = 0;
+ goto have_mem;
+ }
+
+ /*
+ * We're moving to another memory region, so evict first and we'll
+ * do a sw copy to the other region.
+ */
+
+ if (!(vBuf->kBuf.placement & WSBM_PL_FLAG_SYSTEM)) {
+ struct _WsbmListHead tmpLRU;
+
+ WSBMINITLISTHEAD(&tmpLRU);
+ WSBMLISTADDTAIL(&tmpLRU, &vBuf->lru);
+ err = evict_lru(&tmpLRU);
+ if (err)
+ goto have_mem;
+ }
+
+ if (vBuf->proposedPlacement & WSBM_PL_FLAG_TT) {
+ do {
+ vBuf->node =
+ wsbmMMSearchFree(&p->agpMM, vBuf->size, vBuf->alignment, 1);
+ if (vBuf->node)
+ vBuf->node =
+ wsbmMMGetBlock(vBuf->node, vBuf->size, vBuf->alignment);
+
+ if (vBuf->node) {
+ vBuf->kBuf.placement = WSBM_PL_FLAG_TT;
+ vBuf->kBuf.gpuOffset = p->agpOffset + vBuf->node->start;
+ vBuf->map = (void *)(p->agpMap + vBuf->node->start);
+ memcpy(vBuf->map, WSBM_USER_ALIGN_SYSMEM(vBuf->sysmem),
+ vBuf->size);
+ free(vBuf->sysmem);
+ goto have_mem;
+ }
+ } while (evict_lru(&p->agpLRU) == 0);
+ }
+
+ if (vBuf->proposedPlacement & WSBM_PL_FLAG_VRAM) {
+ do {
+ vBuf->node =
+ wsbmMMSearchFree(&p->vramMM, vBuf->size, vBuf->alignment, 1);
+ if (vBuf->node)
+ vBuf->node =
+ wsbmMMGetBlock(vBuf->node, vBuf->size, vBuf->alignment);
+
+ if (!err) {
+ vBuf->kBuf.placement = WSBM_PL_FLAG_VRAM;
+ vBuf->kBuf.gpuOffset = p->vramOffset + vBuf->node->start;
+ vBuf->map = (void *)(p->vramMap + vBuf->node->start);
+ memcpy(vBuf->map, WSBM_USER_ALIGN_SYSMEM(vBuf->sysmem),
+ vBuf->size);
+ free(vBuf->sysmem);
+ goto have_mem;
+ }
+ } while (evict_lru(&p->vramLRU) == 0);
+ }
+
+ if (vBuf->proposedPlacement & WSBM_PL_FLAG_SYSTEM)
+ goto have_mem;
+
+ err = -ENOMEM;
+
+ have_mem:
+ vBuf->newFenceType = p->fenceTypes(set_flags);
+ WSBM_MUTEX_UNLOCK(&p->mutex);
+ WSBM_MUTEX_UNLOCK(&buf->mutex);
+ return err;
+}
+
+static int
+pool_setStatus(struct _WsbmBufStorage *buf,
+ uint32_t set_placement, uint32_t clr_placement)
+{
+ struct _WsbmUserBuffer *vBuf = userBuf(buf);
+ int ret;
+
+ ret = pool_validate(buf, set_placement, clr_placement);
+ vBuf->unFenced = 0;
+ return ret;
+}
+
+void
+release_delayed_buffers(struct _WsbmUserPool *p)
+{
+ struct _WsbmUserBuffer *vBuf;
+ struct _WsbmListHead *list, *next;
+
+ WSBM_MUTEX_LOCK(&p->mutex);
+
+ /*
+ * We don't need to take the buffer mutexes in this loop, since
+ * the only other user is the evict_lru function, which has the
+ * pool mutex held when accessing the buffer fence member.
+ */
+
+ WSBMLISTFOREACHSAFE(list, next, &p->delayed) {
+ vBuf = WSBMLISTENTRY(list, struct _WsbmUserBuffer, delayed);
+
+ if (!vBuf->fence
+ || wsbmFenceSignaled(vBuf->fence, vBuf->kBuf.fence_type_mask)) {
+ if (vBuf->fence)
+ wsbmFenceUnreference(&vBuf->fence);
+
+ WSBMLISTDEL(&vBuf->delayed);
+ WSBMLISTDEL(&vBuf->lru);
+
+ if ((vBuf->kBuf.placement & WSBM_PL_FLAG_SYSTEM) == 0)
+ wsbmMMPutBlock(vBuf->node);
+ else
+ free(vBuf->sysmem);
+
+ free(vBuf);
+ } else
+ break;
+
+ }
+ WSBM_MUTEX_UNLOCK(&p->mutex);
+}
+
+static void
+pool_destroy(struct _WsbmBufStorage **buf)
+{
+ struct _WsbmUserBuffer *vBuf = userBuf(*buf);
+ struct _WsbmUserPool *p = userPool(vBuf);
+
+ *buf = NULL;
+
+ WSBM_MUTEX_LOCK(&vBuf->buf.mutex);
+ if ((vBuf->fence
+ && !wsbmFenceSignaled(vBuf->fence, vBuf->kBuf.fence_type_mask))) {
+ WSBM_MUTEX_LOCK(&p->mutex);
+ WSBMLISTADDTAIL(&vBuf->delayed, &p->delayed);
+ WSBM_MUTEX_UNLOCK(&p->mutex);
+ WSBM_MUTEX_UNLOCK(&vBuf->buf.mutex);
+ return;
+ }
+
+ if (vBuf->fence)
+ wsbmFenceUnreference(&vBuf->fence);
+
+ WSBM_MUTEX_LOCK(&p->mutex);
+ WSBMLISTDEL(&vBuf->lru);
+ WSBM_MUTEX_UNLOCK(&p->mutex);
+
+ if (!(vBuf->kBuf.placement & WSBM_PL_FLAG_SYSTEM))
+ wsbmMMPutBlock(vBuf->node);
+ else
+ free(vBuf->sysmem);
+
+ free(vBuf);
+ return;
+}
+
+static int
+pool_map(struct _WsbmBufStorage *buf, unsigned mode, void **virtual)
+{
+ struct _WsbmUserBuffer *vBuf = userBuf(buf);
+
+ *virtual = vBuf->map;
+ return 0;
+}
+
+static void
+pool_unmap(struct _WsbmBufStorage *buf)
+{
+ ;
+}
+
+static void
+pool_releaseFromCpu(struct _WsbmBufStorage *buf, unsigned mode)
+{
+ struct _WsbmUserBuffer *vBuf = userBuf(buf);
+
+ if (wsbmAtomicDecZero(&vBuf->writers))
+ WSBM_COND_BROADCAST(&vBuf->event);
+
+}
+
+static int
+pool_syncForCpu(struct _WsbmBufStorage *buf, unsigned mode)
+{
+ struct _WsbmUserBuffer *vBuf = userBuf(buf);
+ int ret = 0;
+
+ WSBM_MUTEX_LOCK(&buf->mutex);
+ if ((mode & WSBM_SYNCCPU_DONT_BLOCK)) {
+
+ if (vBuf->unFenced) {
+ ret = -EBUSY;
+ goto out_unlock;
+ }
+
+ ret = 0;
+ if ((vBuf->fence == NULL) ||
+ wsbmFenceSignaled(vBuf->fence, vBuf->kBuf.fence_type_mask)) {
+ wsbmFenceUnreference(&vBuf->fence);
+ wsbmAtomicInc(&vBuf->writers);
+ } else
+ ret = -EBUSY;
+
+ goto out_unlock;
+ }
+ waitIdleLocked(buf, 0);
+ wsbmAtomicInc(&vBuf->writers);
+ out_unlock:
+ WSBM_MUTEX_UNLOCK(&buf->mutex);
+ return ret;
+}
+
+
+static unsigned long
+pool_offset(struct _WsbmBufStorage *buf)
+{
+ return userBuf(buf)->kBuf.gpuOffset;
+}
+
+static unsigned long
+pool_poolOffset(struct _WsbmBufStorage *buf)
+{
+ return 0UL;
+}
+
+static unsigned long
+pool_size(struct _WsbmBufStorage *buf)
+{
+ return userBuf(buf)->size;
+}
+
+static void
+pool_fence(struct _WsbmBufStorage *buf, struct _WsbmFenceObject *fence)
+{
+ struct _WsbmUserBuffer *vBuf = userBuf(buf);
+ struct _WsbmUserPool *p = userPool(vBuf);
+
+ WSBM_MUTEX_LOCK(&buf->mutex);
+
+ if (vBuf->fence)
+ wsbmFenceUnreference(&vBuf->fence);
+
+ vBuf->fence = wsbmFenceReference(fence);
+ vBuf->unFenced = 0;
+ vBuf->kBuf.fence_type_mask = vBuf->newFenceType;
+
+ WSBM_COND_BROADCAST(&vBuf->event);
+ WSBM_MUTEX_LOCK(&p->mutex);
+ if (vBuf->kBuf.placement & WSBM_PL_FLAG_VRAM)
+ WSBMLISTADDTAIL(&vBuf->lru, &p->vramLRU);
+ else if (vBuf->kBuf.placement & WSBM_PL_FLAG_TT)
+ WSBMLISTADDTAIL(&vBuf->lru, &p->agpLRU);
+ WSBM_MUTEX_UNLOCK(&p->mutex);
+ WSBM_MUTEX_UNLOCK(&buf->mutex);
+}
+
+static void
+pool_unvalidate(struct _WsbmBufStorage *buf)
+{
+ struct _WsbmUserBuffer *vBuf = userBuf(buf);
+ struct _WsbmUserPool *p = userPool(vBuf);
+
+ WSBM_MUTEX_LOCK(&buf->mutex);
+
+ if (!vBuf->unFenced)
+ goto out_unlock;
+
+ vBuf->unFenced = 0;
+ WSBM_COND_BROADCAST(&vBuf->event);
+ WSBM_MUTEX_LOCK(&p->mutex);
+ if (vBuf->kBuf.placement & WSBM_PL_FLAG_VRAM)
+ WSBMLISTADDTAIL(&vBuf->lru, &p->vramLRU);
+ else if (vBuf->kBuf.placement & WSBM_PL_FLAG_TT)
+ WSBMLISTADDTAIL(&vBuf->lru, &p->agpLRU);
+ WSBM_MUTEX_UNLOCK(&p->mutex);
+
+ out_unlock:
+
+ WSBM_MUTEX_UNLOCK(&buf->mutex);
+}
+
+static struct _WsbmKernelBuf *
+pool_kernel(struct _WsbmBufStorage *buf)
+{
+ struct _WsbmUserBuffer *vBuf = userBuf(buf);
+
+ return &vBuf->kBuf;
+}
+
+static void
+pool_takedown(struct _WsbmBufferPool *pool)
+{
+ struct _WsbmUserPool *p = containerOf(pool, struct _WsbmUserPool, pool);
+ int empty;
+
+ do {
+ release_delayed_buffers(p);
+ WSBM_MUTEX_LOCK(&p->mutex);
+ empty = (p->delayed.next == &p->delayed);
+ WSBM_MUTEX_UNLOCK(&p->mutex);
+
+ if (!empty)
+ usleep(1000);
+
+ } while (!empty);
+ WSBM_MUTEX_LOCK(&p->mutex);
+
+ while (evict_lru(&p->vramLRU) == 0) ;
+ while (evict_lru(&p->agpLRU) == 0) ;
+
+ WSBM_MUTEX_UNLOCK(&p->mutex);
+
+ wsbmMMtakedown(&p->agpMM);
+ wsbmMMtakedown(&p->vramMM);
+
+ free(p);
+}
+
+void
+wsbmUserPoolClean(struct _WsbmBufferPool *pool,
+ int cleanVram,
+ int cleanAgp)
+{
+ struct _WsbmUserPool *p = containerOf(pool, struct _WsbmUserPool, pool);
+
+ WSBM_MUTEX_LOCK(&p->mutex);
+ if (cleanVram)
+ while (evict_lru(&p->vramLRU) == 0) ;
+ if (cleanAgp)
+ while (evict_lru(&p->agpLRU) == 0) ;
+ WSBM_MUTEX_UNLOCK(&p->mutex);
+}
+
+struct _WsbmBufferPool *
+wsbmUserPoolInit(void *vramAddr,
+ unsigned long vramStart, unsigned long vramSize,
+ void *agpAddr, unsigned long agpStart,
+ unsigned long agpSize,
+ uint32_t (*fenceTypes) (uint64_t set_flags))
+{
+ struct _WsbmBufferPool *pool;
+ struct _WsbmUserPool *uPool;
+ int ret;
+
+ uPool = calloc(1, sizeof(*uPool));
+ if (!uPool)
+ goto out_err0;
+
+ ret = WSBM_MUTEX_INIT(&uPool->mutex);
+ if (ret)
+ goto out_err0;
+
+ ret = wsbmMMinit(&uPool->vramMM, 0, vramSize);
+ if (ret)
+ goto out_err1;
+
+ ret = wsbmMMinit(&uPool->agpMM, 0, agpSize);
+ if (ret)
+ goto out_err2;
+
+ WSBMINITLISTHEAD(&uPool->delayed);
+ WSBMINITLISTHEAD(&uPool->vramLRU);
+ WSBMINITLISTHEAD(&uPool->agpLRU);
+
+ uPool->agpOffset = agpStart;
+ uPool->agpMap = (unsigned long)agpAddr;
+ uPool->vramOffset = vramStart;
+ uPool->vramMap = (unsigned long)vramAddr;
+ uPool->fenceTypes = fenceTypes;
+
+ pool = &uPool->pool;
+ pool->map = &pool_map;
+ pool->unmap = &pool_unmap;
+ pool->destroy = &pool_destroy;
+ pool->offset = &pool_offset;
+ pool->poolOffset = &pool_poolOffset;
+ pool->size = &pool_size;
+ pool->create = &pool_create;
+ pool->fence = &pool_fence;
+ pool->unvalidate = &pool_unvalidate;
+ pool->kernel = &pool_kernel;
+ pool->validate = &pool_validate;
+ pool->waitIdle = &pool_waitIdle;
+ pool->takeDown = &pool_takedown;
+ pool->setStatus = &pool_setStatus;
+ pool->syncforcpu = &pool_syncForCpu;
+ pool->releasefromcpu = &pool_releaseFromCpu;
+
+ return pool;
+
+ out_err2:
+ wsbmMMtakedown(&uPool->vramMM);
+ out_err1:
+ WSBM_MUTEX_FREE(&uPool->mutex);
+ out_err0:
+ free(uPool);
+
+ return NULL;
+}
diff --git a/src/wsbm_util.h b/src/wsbm_util.h
new file mode 100644
index 0000000..55361e8
--- /dev/null
+++ b/src/wsbm_util.h
@@ -0,0 +1,76 @@
+/*
+ * This file is not copyrighted for obvious reasons.
+ */
+
+#ifndef _WSBM_UTIL_H_
+#define _WSBM_UTIL_H_
+
+#include <stddef.h>
+
+#ifndef containerOf
+#define containerOf(__item, __type, __field) \
+ ((__type *)(((char *) (__item)) - offsetof(__type, __field)))
+#endif
+
+struct _WsbmListHead
+{
+ struct _WsbmListHead *prev;
+ struct _WsbmListHead *next;
+};
+
+#define WSBMINITLISTHEAD(__item) \
+ do{ \
+ (__item)->prev = (__item); \
+ (__item)->next = (__item); \
+ } while (0)
+
+#define WSBMLISTADD(__item, __list) \
+ do { \
+ (__item)->prev = (__list); \
+ (__item)->next = (__list)->next; \
+ (__list)->next->prev = (__item); \
+ (__list)->next = (__item); \
+ } while (0)
+
+#define WSBMLISTADDTAIL(__item, __list) \
+ do { \
+ (__item)->next = (__list); \
+ (__item)->prev = (__list)->prev; \
+ (__list)->prev->next = (__item); \
+ (__list)->prev = (__item); \
+ } while(0)
+
+#define WSBMLISTDEL(__item) \
+ do { \
+ (__item)->prev->next = (__item)->next; \
+ (__item)->next->prev = (__item)->prev; \
+ } while(0)
+
+#define WSBMLISTDELINIT(__item) \
+ do { \
+ (__item)->prev->next = (__item)->next; \
+ (__item)->next->prev = (__item)->prev; \
+ (__item)->next = (__item); \
+ (__item)->prev = (__item); \
+ } while(0)
+
+#define WSBMLISTFOREACH(__item, __list) \
+ for((__item) = (__list)->next; (__item) != (__list); (__item) = (__item)->next)
+
+#define WSBMLISTFOREACHPREV(__item, __list) \
+ for((__item) = (__list)->prev; (__item) != (__list); (__item) = (__item)->prev)
+
+#define WSBMLISTFOREACHSAFE(__item, __next, __list) \
+ for((__item) = (__list)->next, (__next) = (__item)->next; \
+ (__item) != (__list); \
+ (__item) = (__next), (__next) = (__item)->next)
+
+#define WSBMLISTFOREACHPREVSAFE(__item, __prev, __list) \
+ for((__item) = (__list)->prev, (__prev) = (__item->prev); \
+ (__item) != (__list); \
+ (__item) = (__prev), (__prev) = (__item)->prev)
+
+#define WSBMLISTENTRY(__item, __type, __field) \
+ containerOf(__item, __type, __field)
+
+#endif