summaryrefslogtreecommitdiff
path: root/drivers/gpu
diff options
context:
space:
mode:
authorDave Airlie <airlied@redhat.com>2018-10-04 10:19:33 +1000
committerDave Airlie <airlied@redhat.com>2018-10-04 10:19:33 +1000
commitd04a836ea76c49db8c5547612d9c9cbb97f0402d (patch)
tree7874b1968e5abc3d164efb29d8993932cf14f509 /drivers/gpu
parent87c2ee740c07f1edae9eec8bc45cb9b32a68f323 (diff)
parente8c66efbfe3a2e3cbc573f2474a3d51690f1b857 (diff)
Merge branch 'vmwgfx-next' of git://people.freedesktop.org/~thomash/linux into drm-next
Mostly code reorganizations and optimizations for vmwgfx. - Move TTM code that's only used by vmwgfx to vmwgfx - Break out the vmwgfx buffer- and resource validation code to a separate source file - Get rid of a number of atomic operations during command buffer validation. From: Thomas Hellstrom <thellstrom@vmware.com> Link: https://patchwork.freedesktop.org/patch/msgid/20180928131157.2810-1-thellstrom@vmware.com
Diffstat (limited to 'drivers/gpu')
-rw-r--r--drivers/gpu/drm/ttm/Makefile4
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo_vm.c3
-rw-r--r--drivers/gpu/drm/vmwgfx/Makefile4
-rw-r--r--drivers/gpu/drm/vmwgfx/ttm_lock.c (renamed from drivers/gpu/drm/ttm/ttm_lock.c)15
-rw-r--r--drivers/gpu/drm/vmwgfx/ttm_lock.h248
-rw-r--r--drivers/gpu/drm/vmwgfx/ttm_object.c (renamed from drivers/gpu/drm/ttm/ttm_object.c)97
-rw-r--r--drivers/gpu/drm/vmwgfx/ttm_object.h375
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_bo.c50
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf.c4
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf_res.c3
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_context.c23
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_cotable.c2
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_drv.c4
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_drv.h147
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c1288
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_fence.c7
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_kms.c199
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_kms.h24
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_prime.c2
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_resource.c124
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_resource_priv.h7
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c48
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_shader.c25
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_simple_resource.c7
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_so.c11
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c31
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_surface.c20
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_validation.c770
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_validation.h227
29 files changed, 2545 insertions, 1224 deletions
diff --git a/drivers/gpu/drm/ttm/Makefile b/drivers/gpu/drm/ttm/Makefile
index a60e560804e0..01fc670ce7a2 100644
--- a/drivers/gpu/drm/ttm/Makefile
+++ b/drivers/gpu/drm/ttm/Makefile
@@ -4,8 +4,8 @@
ttm-y := ttm_memory.o ttm_tt.o ttm_bo.o \
ttm_bo_util.o ttm_bo_vm.o ttm_module.o \
- ttm_object.o ttm_lock.o ttm_execbuf_util.o ttm_page_alloc.o \
- ttm_bo_manager.o ttm_page_alloc_dma.o
+ ttm_execbuf_util.o ttm_page_alloc.o ttm_bo_manager.o \
+ ttm_page_alloc_dma.o
ttm-$(CONFIG_AGP) += ttm_agp_backend.o
obj-$(CONFIG_DRM_TTM) += ttm.o
diff --git a/drivers/gpu/drm/ttm/ttm_bo_vm.c b/drivers/gpu/drm/ttm/ttm_bo_vm.c
index 6fe91c1b692d..a1d977fbade5 100644
--- a/drivers/gpu/drm/ttm/ttm_bo_vm.c
+++ b/drivers/gpu/drm/ttm/ttm_bo_vm.c
@@ -409,8 +409,7 @@ static struct ttm_buffer_object *ttm_bo_vm_lookup(struct ttm_bo_device *bdev,
node = drm_vma_offset_lookup_locked(&bdev->vma_manager, offset, pages);
if (likely(node)) {
bo = container_of(node, struct ttm_buffer_object, vma_node);
- if (!kref_get_unless_zero(&bo->kref))
- bo = NULL;
+ bo = ttm_bo_get_unless_zero(bo);
}
drm_vma_offset_unlock_lookup(&bdev->vma_manager);
diff --git a/drivers/gpu/drm/vmwgfx/Makefile b/drivers/gpu/drm/vmwgfx/Makefile
index 09b2aa08363e..8841bd30e1e5 100644
--- a/drivers/gpu/drm/vmwgfx/Makefile
+++ b/drivers/gpu/drm/vmwgfx/Makefile
@@ -7,6 +7,8 @@ vmwgfx-y := vmwgfx_execbuf.o vmwgfx_gmr.o vmwgfx_kms.o vmwgfx_drv.o \
vmwgfx_surface.o vmwgfx_prime.o vmwgfx_mob.o vmwgfx_shader.o \
vmwgfx_cmdbuf_res.o vmwgfx_cmdbuf.o vmwgfx_stdu.o \
vmwgfx_cotable.o vmwgfx_so.o vmwgfx_binding.o vmwgfx_msg.o \
- vmwgfx_simple_resource.o vmwgfx_va.o vmwgfx_blit.o
+ vmwgfx_simple_resource.o vmwgfx_va.o vmwgfx_blit.o \
+ vmwgfx_validation.o \
+ ttm_object.o ttm_lock.o
obj-$(CONFIG_DRM_VMWGFX) := vmwgfx.o
diff --git a/drivers/gpu/drm/ttm/ttm_lock.c b/drivers/gpu/drm/vmwgfx/ttm_lock.c
index 20694b8a01ca..16b2083cb9d4 100644
--- a/drivers/gpu/drm/ttm/ttm_lock.c
+++ b/drivers/gpu/drm/vmwgfx/ttm_lock.c
@@ -29,13 +29,13 @@
* Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
*/
-#include <drm/ttm/ttm_lock.h>
#include <drm/ttm/ttm_module.h>
#include <linux/atomic.h>
#include <linux/errno.h>
#include <linux/wait.h>
#include <linux/sched/signal.h>
-#include <linux/module.h>
+#include "ttm_lock.h"
+#include "ttm_object.h"
#define TTM_WRITE_LOCK_PENDING (1 << 0)
#define TTM_VT_LOCK_PENDING (1 << 1)
@@ -52,7 +52,6 @@ void ttm_lock_init(struct ttm_lock *lock)
lock->kill_takers = false;
lock->signal = SIGKILL;
}
-EXPORT_SYMBOL(ttm_lock_init);
void ttm_read_unlock(struct ttm_lock *lock)
{
@@ -61,7 +60,6 @@ void ttm_read_unlock(struct ttm_lock *lock)
wake_up_all(&lock->queue);
spin_unlock(&lock->lock);
}
-EXPORT_SYMBOL(ttm_read_unlock);
static bool __ttm_read_lock(struct ttm_lock *lock)
{
@@ -92,7 +90,6 @@ int ttm_read_lock(struct ttm_lock *lock, bool interruptible)
wait_event(lock->queue, __ttm_read_lock(lock));
return ret;
}
-EXPORT_SYMBOL(ttm_read_lock);
static bool __ttm_read_trylock(struct ttm_lock *lock, bool *locked)
{
@@ -144,7 +141,6 @@ void ttm_write_unlock(struct ttm_lock *lock)
wake_up_all(&lock->queue);
spin_unlock(&lock->lock);
}
-EXPORT_SYMBOL(ttm_write_unlock);
static bool __ttm_write_lock(struct ttm_lock *lock)
{
@@ -185,7 +181,6 @@ int ttm_write_lock(struct ttm_lock *lock, bool interruptible)
return ret;
}
-EXPORT_SYMBOL(ttm_write_lock);
static int __ttm_vt_unlock(struct ttm_lock *lock)
{
@@ -262,14 +257,12 @@ int ttm_vt_lock(struct ttm_lock *lock,
return ret;
}
-EXPORT_SYMBOL(ttm_vt_lock);
int ttm_vt_unlock(struct ttm_lock *lock)
{
return ttm_ref_object_base_unref(lock->vt_holder,
- lock->base.hash.key, TTM_REF_USAGE);
+ lock->base.handle, TTM_REF_USAGE);
}
-EXPORT_SYMBOL(ttm_vt_unlock);
void ttm_suspend_unlock(struct ttm_lock *lock)
{
@@ -278,7 +271,6 @@ void ttm_suspend_unlock(struct ttm_lock *lock)
wake_up_all(&lock->queue);
spin_unlock(&lock->lock);
}
-EXPORT_SYMBOL(ttm_suspend_unlock);
static bool __ttm_suspend_lock(struct ttm_lock *lock)
{
@@ -300,4 +292,3 @@ void ttm_suspend_lock(struct ttm_lock *lock)
{
wait_event(lock->queue, __ttm_suspend_lock(lock));
}
-EXPORT_SYMBOL(ttm_suspend_lock);
diff --git a/drivers/gpu/drm/vmwgfx/ttm_lock.h b/drivers/gpu/drm/vmwgfx/ttm_lock.h
new file mode 100644
index 000000000000..0c3af9836863
--- /dev/null
+++ b/drivers/gpu/drm/vmwgfx/ttm_lock.h
@@ -0,0 +1,248 @@
+/**************************************************************************
+ *
+ * Copyright (c) 2007-2009 VMware, Inc., Palo Alto, CA., USA
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+/*
+ * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
+ */
+
+/** @file ttm_lock.h
+ * This file implements a simple replacement for the buffer manager use
+ * of the DRM heavyweight hardware lock.
+ * The lock is a read-write lock. Taking it in read mode and write mode
+ * is relatively fast, and intended for in-kernel use only.
+ *
+ * The vt mode is used only when there is a need to block all
+ * user-space processes from validating buffers.
+ * It's allowed to leave kernel space with the vt lock held.
+ * If a user-space process dies while having the vt-lock,
+ * it will be released during the file descriptor release. The vt lock
+ * excludes write lock and read lock.
+ *
+ * The suspend mode is used to lock out all TTM users when preparing for
+ * and executing suspend operations.
+ *
+ */
+
+#ifndef _TTM_LOCK_H_
+#define _TTM_LOCK_H_
+
+#include <linux/wait.h>
+#include <linux/atomic.h>
+
+#include "ttm_object.h"
+
+/**
+ * struct ttm_lock
+ *
+ * @base: ttm base object used solely to release the lock if the client
+ * holding the lock dies.
+ * @queue: Queue for processes waiting for lock change-of-status.
+ * @lock: Spinlock protecting some lock members.
+ * @rw: Read-write lock counter. Protected by @lock.
+ * @flags: Lock state. Protected by @lock.
+ * @kill_takers: Boolean whether to kill takers of the lock.
+ * @signal: Signal to send when kill_takers is true.
+ */
+
+struct ttm_lock {
+ struct ttm_base_object base;
+ wait_queue_head_t queue;
+ spinlock_t lock;
+ int32_t rw;
+ uint32_t flags;
+ bool kill_takers;
+ int signal;
+ struct ttm_object_file *vt_holder;
+};
+
+
+/**
+ * ttm_lock_init
+ *
+ * @lock: Pointer to a struct ttm_lock
+ * Initializes the lock.
+ */
+extern void ttm_lock_init(struct ttm_lock *lock);
+
+/**
+ * ttm_read_unlock
+ *
+ * @lock: Pointer to a struct ttm_lock
+ *
+ * Releases a read lock.
+ */
+extern void ttm_read_unlock(struct ttm_lock *lock);
+
+/**
+ * ttm_read_lock
+ *
+ * @lock: Pointer to a struct ttm_lock
+ * @interruptible: Interruptible sleeping while waiting for a lock.
+ *
+ * Takes the lock in read mode.
+ * Returns:
+ * -ERESTARTSYS If interrupted by a signal and interruptible is true.
+ */
+extern int ttm_read_lock(struct ttm_lock *lock, bool interruptible);
+
+/**
+ * ttm_read_trylock
+ *
+ * @lock: Pointer to a struct ttm_lock
+ * @interruptible: Interruptible sleeping while waiting for a lock.
+ *
+ * Tries to take the lock in read mode. If the lock is already held
+ * in write mode, the function will return -EBUSY. If the lock is held
+ * in vt or suspend mode, the function will sleep until these modes
+ * are unlocked.
+ *
+ * Returns:
+ * -EBUSY The lock was already held in write mode.
+ * -ERESTARTSYS If interrupted by a signal and interruptible is true.
+ */
+extern int ttm_read_trylock(struct ttm_lock *lock, bool interruptible);
+
+/**
+ * ttm_write_unlock
+ *
+ * @lock: Pointer to a struct ttm_lock
+ *
+ * Releases a write lock.
+ */
+extern void ttm_write_unlock(struct ttm_lock *lock);
+
+/**
+ * ttm_write_lock
+ *
+ * @lock: Pointer to a struct ttm_lock
+ * @interruptible: Interruptible sleeping while waiting for a lock.
+ *
+ * Takes the lock in write mode.
+ * Returns:
+ * -ERESTARTSYS If interrupted by a signal and interruptible is true.
+ */
+extern int ttm_write_lock(struct ttm_lock *lock, bool interruptible);
+
+/**
+ * ttm_lock_downgrade
+ *
+ * @lock: Pointer to a struct ttm_lock
+ *
+ * Downgrades a write lock to a read lock.
+ */
+extern void ttm_lock_downgrade(struct ttm_lock *lock);
+
+/**
+ * ttm_suspend_lock
+ *
+ * @lock: Pointer to a struct ttm_lock
+ *
+ * Takes the lock in suspend mode. Excludes read and write mode.
+ */
+extern void ttm_suspend_lock(struct ttm_lock *lock);
+
+/**
+ * ttm_suspend_unlock
+ *
+ * @lock: Pointer to a struct ttm_lock
+ *
+ * Releases a suspend lock
+ */
+extern void ttm_suspend_unlock(struct ttm_lock *lock);
+
+/**
+ * ttm_vt_lock
+ *
+ * @lock: Pointer to a struct ttm_lock
+ * @interruptible: Interruptible sleeping while waiting for a lock.
+ * @tfile: Pointer to a struct ttm_object_file to register the lock with.
+ *
+ * Takes the lock in vt mode.
+ * Returns:
+ * -ERESTARTSYS If interrupted by a signal and interruptible is true.
+ * -ENOMEM: Out of memory when locking.
+ */
+extern int ttm_vt_lock(struct ttm_lock *lock, bool interruptible,
+ struct ttm_object_file *tfile);
+
+/**
+ * ttm_vt_unlock
+ *
+ * @lock: Pointer to a struct ttm_lock
+ *
+ * Releases a vt lock.
+ * Returns:
+ * -EINVAL If the lock was not held.
+ */
+extern int ttm_vt_unlock(struct ttm_lock *lock);
+
+/**
+ * ttm_write_unlock
+ *
+ * @lock: Pointer to a struct ttm_lock
+ *
+ * Releases a write lock.
+ */
+extern void ttm_write_unlock(struct ttm_lock *lock);
+
+/**
+ * ttm_write_lock
+ *
+ * @lock: Pointer to a struct ttm_lock
+ * @interruptible: Interruptible sleeping while waiting for a lock.
+ *
+ * Takes the lock in write mode.
+ * Returns:
+ * -ERESTARTSYS If interrupted by a signal and interruptible is true.
+ */
+extern int ttm_write_lock(struct ttm_lock *lock, bool interruptible);
+
+/**
+ * ttm_lock_set_kill
+ *
+ * @lock: Pointer to a struct ttm_lock
+ * @val: Boolean whether to kill processes taking the lock.
+ * @signal: Signal to send to the process taking the lock.
+ *
+ * The kill-when-taking-lock functionality is used to kill processes that keep
+ * on using the TTM functionality when its resources has been taken down, for
+ * example when the X server exits. A typical sequence would look like this:
+ * - X server takes lock in write mode.
+ * - ttm_lock_set_kill() is called with @val set to true.
+ * - As part of X server exit, TTM resources are taken down.
+ * - X server releases the lock on file release.
+ * - Another dri client wants to render, takes the lock and is killed.
+ *
+ */
+static inline void ttm_lock_set_kill(struct ttm_lock *lock, bool val,
+ int signal)
+{
+ lock->kill_takers = val;
+ if (val)
+ lock->signal = signal;
+}
+
+#endif
diff --git a/drivers/gpu/drm/ttm/ttm_object.c b/drivers/gpu/drm/vmwgfx/ttm_object.c
index 74f1b1eb1f8e..36990b80e790 100644
--- a/drivers/gpu/drm/ttm/ttm_object.c
+++ b/drivers/gpu/drm/vmwgfx/ttm_object.c
@@ -59,13 +59,12 @@
#define pr_fmt(fmt) "[TTM] " fmt
-#include <drm/ttm/ttm_object.h>
#include <drm/ttm/ttm_module.h>
#include <linux/list.h>
#include <linux/spinlock.h>
#include <linux/slab.h>
-#include <linux/module.h>
#include <linux/atomic.h>
+#include "ttm_object.h"
struct ttm_object_file {
struct ttm_object_device *tdev;
@@ -95,6 +94,7 @@ struct ttm_object_device {
struct dma_buf_ops ops;
void (*dmabuf_release)(struct dma_buf *dma_buf);
size_t dma_buf_size;
+ struct idr idr;
};
/**
@@ -172,14 +172,15 @@ int ttm_base_object_init(struct ttm_object_file *tfile,
base->ref_obj_release = ref_obj_release;
base->object_type = object_type;
kref_init(&base->refcount);
+ idr_preload(GFP_KERNEL);
spin_lock(&tdev->object_lock);
- ret = drm_ht_just_insert_please_rcu(&tdev->object_hash,
- &base->hash,
- (unsigned long)base, 31, 0, 0);
+ ret = idr_alloc(&tdev->idr, base, 0, 0, GFP_NOWAIT);
spin_unlock(&tdev->object_lock);
- if (unlikely(ret != 0))
- goto out_err0;
+ idr_preload_end();
+ if (ret < 0)
+ return ret;
+ base->handle = ret;
ret = ttm_ref_object_add(tfile, base, TTM_REF_USAGE, NULL, false);
if (unlikely(ret != 0))
goto out_err1;
@@ -189,12 +190,10 @@ int ttm_base_object_init(struct ttm_object_file *tfile,
return 0;
out_err1:
spin_lock(&tdev->object_lock);
- (void)drm_ht_remove_item_rcu(&tdev->object_hash, &base->hash);
+ idr_remove(&tdev->idr, base->handle);
spin_unlock(&tdev->object_lock);
-out_err0:
return ret;
}
-EXPORT_SYMBOL(ttm_base_object_init);
static void ttm_release_base(struct kref *kref)
{
@@ -203,7 +202,7 @@ static void ttm_release_base(struct kref *kref)
struct ttm_object_device *tdev = base->tfile->tdev;
spin_lock(&tdev->object_lock);
- (void)drm_ht_remove_item_rcu(&tdev->object_hash, &base->hash);
+ idr_remove(&tdev->idr, base->handle);
spin_unlock(&tdev->object_lock);
/*
@@ -225,7 +224,41 @@ void ttm_base_object_unref(struct ttm_base_object **p_base)
kref_put(&base->refcount, ttm_release_base);
}
-EXPORT_SYMBOL(ttm_base_object_unref);
+
+/**
+ * ttm_base_object_noref_lookup - look up a base object without reference
+ * @tfile: The struct ttm_object_file the object is registered with.
+ * @key: The object handle.
+ *
+ * This function looks up a ttm base object and returns a pointer to it
+ * without refcounting the pointer. The returned pointer is only valid
+ * until ttm_base_object_noref_release() is called, and the object
+ * pointed to by the returned pointer may be doomed. Any persistent usage
+ * of the object requires a refcount to be taken using kref_get_unless_zero().
+ * Iff this function returns successfully it needs to be paired with
+ * ttm_base_object_noref_release() and no sleeping- or scheduling functions
+ * may be called inbetween these function callse.
+ *
+ * Return: A pointer to the object if successful or NULL otherwise.
+ */
+struct ttm_base_object *
+ttm_base_object_noref_lookup(struct ttm_object_file *tfile, uint32_t key)
+{
+ struct drm_hash_item *hash;
+ struct drm_open_hash *ht = &tfile->ref_hash[TTM_REF_USAGE];
+ int ret;
+
+ rcu_read_lock();
+ ret = drm_ht_find_item_rcu(ht, key, &hash);
+ if (ret) {
+ rcu_read_unlock();
+ return NULL;
+ }
+
+ __release(RCU);
+ return drm_hash_entry(hash, struct ttm_ref_object, hash)->obj;
+}
+EXPORT_SYMBOL(ttm_base_object_noref_lookup);
struct ttm_base_object *ttm_base_object_lookup(struct ttm_object_file *tfile,
uint32_t key)
@@ -247,29 +280,21 @@ struct ttm_base_object *ttm_base_object_lookup(struct ttm_object_file *tfile,
return base;
}
-EXPORT_SYMBOL(ttm_base_object_lookup);
struct ttm_base_object *
ttm_base_object_lookup_for_ref(struct ttm_object_device *tdev, uint32_t key)
{
- struct ttm_base_object *base = NULL;
- struct drm_hash_item *hash;
- struct drm_open_hash *ht = &tdev->object_hash;
- int ret;
+ struct ttm_base_object *base;
rcu_read_lock();
- ret = drm_ht_find_item_rcu(ht, key, &hash);
+ base = idr_find(&tdev->idr, key);
- if (likely(ret == 0)) {
- base = drm_hash_entry(hash, struct ttm_base_object, hash);
- if (!kref_get_unless_zero(&base->refcount))
- base = NULL;
- }
+ if (base && !kref_get_unless_zero(&base->refcount))
+ base = NULL;
rcu_read_unlock();
return base;
}
-EXPORT_SYMBOL(ttm_base_object_lookup_for_ref);
/**
* ttm_ref_object_exists - Check whether a caller has a valid ref object
@@ -289,7 +314,7 @@ bool ttm_ref_object_exists(struct ttm_object_file *tfile,
struct ttm_ref_object *ref;
rcu_read_lock();
- if (unlikely(drm_ht_find_item_rcu(ht, base->hash.key, &hash) != 0))
+ if (unlikely(drm_ht_find_item_rcu(ht, base->handle, &hash) != 0))
goto out_false;
/*
@@ -315,7 +340,6 @@ bool ttm_ref_object_exists(struct ttm_object_file *tfile,
rcu_read_unlock();
return false;
}
-EXPORT_SYMBOL(ttm_ref_object_exists);
int ttm_ref_object_add(struct ttm_object_file *tfile,
struct ttm_base_object *base,
@@ -340,7 +364,7 @@ int ttm_ref_object_add(struct ttm_object_file *tfile,
while (ret == -EINVAL) {
rcu_read_lock();
- ret = drm_ht_find_item_rcu(ht, base->hash.key, &hash);
+ ret = drm_ht_find_item_rcu(ht, base->handle, &hash);
if (ret == 0) {
ref = drm_hash_entry(hash, struct ttm_ref_object, hash);
@@ -364,7 +388,7 @@ int ttm_ref_object_add(struct ttm_object_file *tfile,
return -ENOMEM;
}
- ref->hash.key = base->hash.key;
+ ref->hash.key = base->handle;
ref->obj = base;
ref->tfile = tfile;
ref->ref_type = ref_type;
@@ -391,9 +415,9 @@ int ttm_ref_object_add(struct ttm_object_file *tfile,
return ret;
}
-EXPORT_SYMBOL(ttm_ref_object_add);
-static void ttm_ref_object_release(struct kref *kref)
+static void __releases(tfile->lock) __acquires(tfile->lock)
+ttm_ref_object_release(struct kref *kref)
{
struct ttm_ref_object *ref =
container_of(kref, struct ttm_ref_object, kref);
@@ -435,7 +459,6 @@ int ttm_ref_object_base_unref(struct ttm_object_file *tfile,
spin_unlock(&tfile->lock);
return 0;
}
-EXPORT_SYMBOL(ttm_ref_object_base_unref);
void ttm_object_file_release(struct ttm_object_file **p_tfile)
{
@@ -464,7 +487,6 @@ void ttm_object_file_release(struct ttm_object_file **p_tfile)
ttm_object_file_unref(&tfile);
}
-EXPORT_SYMBOL(ttm_object_file_release);
struct ttm_object_file *ttm_object_file_init(struct ttm_object_device *tdev,
unsigned int hash_order)
@@ -499,7 +521,6 @@ out_err:
return NULL;
}
-EXPORT_SYMBOL(ttm_object_file_init);
struct ttm_object_device *
ttm_object_device_init(struct ttm_mem_global *mem_glob,
@@ -519,6 +540,7 @@ ttm_object_device_init(struct ttm_mem_global *mem_glob,
if (ret != 0)
goto out_no_object_hash;
+ idr_init(&tdev->idr);
tdev->ops = *ops;
tdev->dmabuf_release = tdev->ops.release;
tdev->ops.release = ttm_prime_dmabuf_release;
@@ -530,7 +552,6 @@ out_no_object_hash:
kfree(tdev);
return NULL;
}
-EXPORT_SYMBOL(ttm_object_device_init);
void ttm_object_device_release(struct ttm_object_device **p_tdev)
{
@@ -538,11 +559,12 @@ void ttm_object_device_release(struct ttm_object_device **p_tdev)
*p_tdev = NULL;
+ WARN_ON_ONCE(!idr_is_empty(&tdev->idr));
+ idr_destroy(&tdev->idr);
drm_ht_remove(&tdev->object_hash);
kfree(tdev);
}
-EXPORT_SYMBOL(ttm_object_device_release);
/**
* get_dma_buf_unless_doomed - get a dma_buf reference if possible.
@@ -641,14 +663,13 @@ int ttm_prime_fd_to_handle(struct ttm_object_file *tfile,
prime = (struct ttm_prime_object *) dma_buf->priv;
base = &prime->base;
- *handle = base->hash.key;
+ *handle = base->handle;
ret = ttm_ref_object_add(tfile, base, TTM_REF_USAGE, NULL, false);
dma_buf_put(dma_buf);
return ret;
}
-EXPORT_SYMBOL_GPL(ttm_prime_fd_to_handle);
/**
* ttm_prime_handle_to_fd - Return a dma_buf fd from a ttm prime object
@@ -739,7 +760,6 @@ out_unref:
ttm_base_object_unref(&base);
return ret;
}
-EXPORT_SYMBOL_GPL(ttm_prime_handle_to_fd);
/**
* ttm_prime_object_init - Initialize a ttm_prime_object
@@ -772,4 +792,3 @@ int ttm_prime_object_init(struct ttm_object_file *tfile, size_t size,
ttm_prime_refcount_release,
ref_obj_release);
}
-EXPORT_SYMBOL(ttm_prime_object_init);
diff --git a/drivers/gpu/drm/vmwgfx/ttm_object.h b/drivers/gpu/drm/vmwgfx/ttm_object.h
new file mode 100644
index 000000000000..50d26c7ff42d
--- /dev/null
+++ b/drivers/gpu/drm/vmwgfx/ttm_object.h
@@ -0,0 +1,375 @@
+/**************************************************************************
+ *
+ * Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+/*
+ * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
+ */
+/** @file ttm_object.h
+ *
+ * Base- and reference object implementation for the various
+ * ttm objects. Implements reference counting, minimal security checks
+ * and release on file close.
+ */
+
+#ifndef _TTM_OBJECT_H_
+#define _TTM_OBJECT_H_
+
+#include <linux/list.h>
+#include <drm/drm_hashtab.h>
+#include <linux/kref.h>
+#include <linux/rcupdate.h>
+#include <linux/dma-buf.h>
+#include <drm/ttm/ttm_memory.h>
+
+/**
+ * enum ttm_ref_type
+ *
+ * Describes what type of reference a ref object holds.
+ *
+ * TTM_REF_USAGE is a simple refcount on a base object.
+ *
+ * TTM_REF_SYNCCPU_READ is a SYNCCPU_READ reference on a
+ * buffer object.
+ *
+ * TTM_REF_SYNCCPU_WRITE is a SYNCCPU_WRITE reference on a
+ * buffer object.
+ *
+ */
+
+enum ttm_ref_type {
+ TTM_REF_USAGE,
+ TTM_REF_SYNCCPU_READ,
+ TTM_REF_SYNCCPU_WRITE,
+ TTM_REF_NUM
+};
+
+/**
+ * enum ttm_object_type
+ *
+ * One entry per ttm object type.
+ * Device-specific types should use the
+ * ttm_driver_typex types.
+ */
+
+enum ttm_object_type {
+ ttm_fence_type,
+ ttm_buffer_type,
+ ttm_lock_type,
+ ttm_prime_type,
+ ttm_driver_type0 = 256,
+ ttm_driver_type1,
+ ttm_driver_type2,
+ ttm_driver_type3,
+ ttm_driver_type4,
+ ttm_driver_type5
+};
+
+struct ttm_object_file;
+struct ttm_object_device;
+
+/**
+ * struct ttm_base_object
+ *
+ * @hash: hash entry for the per-device object hash.
+ * @type: derived type this object is base class for.
+ * @shareable: Other ttm_object_files can access this object.
+ *
+ * @tfile: Pointer to ttm_object_file of the creator.
+ * NULL if the object was not created by a user request.
+ * (kernel object).
+ *
+ * @refcount: Number of references to this object, not
+ * including the hash entry. A reference to a base object can
+ * only be held by a ref object.
+ *
+ * @refcount_release: A function to be called when there are
+ * no more references to this object. This function should
+ * destroy the object (or make sure destruction eventually happens),
+ * and when it is called, the object has
+ * already been taken out of the per-device hash. The parameter
+ * "base" should be set to NULL by the function.
+ *
+ * @ref_obj_release: A function to be called when a reference object
+ * with another ttm_ref_type than TTM_REF_USAGE is deleted.
+ * This function may, for example, release a lock held by a user-space
+ * process.
+ *
+ * This struct is intended to be used as a base struct for objects that
+ * are visible to user-space. It provides a global name, race-safe
+ * access and refcounting, minimal access contol and hooks for unref actions.
+ */
+
+struct ttm_base_object {
+ struct rcu_head rhead;
+ struct ttm_object_file *tfile;
+ struct kref refcount;
+ void (*refcount_release) (struct ttm_base_object **base);
+ void (*ref_obj_release) (struct ttm_base_object *base,
+ enum ttm_ref_type ref_type);
+ u32 handle;
+ enum ttm_object_type object_type;
+ u32 shareable;
+};
+
+
+/**
+ * struct ttm_prime_object - Modified base object that is prime-aware
+ *
+ * @base: struct ttm_base_object that we derive from
+ * @mutex: Mutex protecting the @dma_buf member.
+ * @size: Size of the dma_buf associated with this object
+ * @real_type: Type of the underlying object. Needed since we're setting
+ * the value of @base::object_type to ttm_prime_type
+ * @dma_buf: Non ref-coutned pointer to a struct dma_buf created from this
+ * object.
+ * @refcount_release: The underlying object's release method. Needed since
+ * we set @base::refcount_release to our own release method.
+ */
+
+struct ttm_prime_object {
+ struct ttm_base_object base;
+ struct mutex mutex;
+ size_t size;
+ enum ttm_object_type real_type;
+ struct dma_buf *dma_buf;
+ void (*refcount_release) (struct ttm_base_object **);
+};
+
+/**
+ * ttm_base_object_init
+ *
+ * @tfile: Pointer to a struct ttm_object_file.
+ * @base: The struct ttm_base_object to initialize.
+ * @shareable: This object is shareable with other applcations.
+ * (different @tfile pointers.)
+ * @type: The object type.
+ * @refcount_release: See the struct ttm_base_object description.
+ * @ref_obj_release: See the struct ttm_base_object description.
+ *
+ * Initializes a struct ttm_base_object.
+ */
+
+extern int ttm_base_object_init(struct ttm_object_file *tfile,
+ struct ttm_base_object *base,
+ bool shareable,
+ enum ttm_object_type type,
+ void (*refcount_release) (struct ttm_base_object
+ **),
+ void (*ref_obj_release) (struct ttm_base_object
+ *,
+ enum ttm_ref_type
+ ref_type));
+
+/**
+ * ttm_base_object_lookup
+ *
+ * @tfile: Pointer to a struct ttm_object_file.
+ * @key: Hash key
+ *
+ * Looks up a struct ttm_base_object with the key @key.
+ */
+
+extern struct ttm_base_object *ttm_base_object_lookup(struct ttm_object_file
+ *tfile, uint32_t key);
+
+/**
+ * ttm_base_object_lookup_for_ref
+ *
+ * @tdev: Pointer to a struct ttm_object_device.
+ * @key: Hash key
+ *
+ * Looks up a struct ttm_base_object with the key @key.
+ * This function should only be used when the struct tfile associated with the
+ * caller doesn't yet have a reference to the base object.
+ */
+
+extern struct ttm_base_object *
+ttm_base_object_lookup_for_ref(struct ttm_object_device *tdev, uint32_t key);
+
+/**
+ * ttm_base_object_unref
+ *
+ * @p_base: Pointer to a pointer referencing a struct ttm_base_object.
+ *
+ * Decrements the base object refcount and clears the pointer pointed to by
+ * p_base.
+ */
+
+extern void ttm_base_object_unref(struct ttm_base_object **p_base);
+
+/**
+ * ttm_ref_object_add.
+ *
+ * @tfile: A struct ttm_object_file representing the application owning the
+ * ref_object.
+ * @base: The base object to reference.
+ * @ref_type: The type of reference.
+ * @existed: Upon completion, indicates that an identical reference object
+ * already existed, and the refcount was upped on that object instead.
+ * @require_existed: Fail with -EPERM if an identical ref object didn't
+ * already exist.
+ *
+ * Checks that the base object is shareable and adds a ref object to it.
+ *
+ * Adding a ref object to a base object is basically like referencing the
+ * base object, but a user-space application holds the reference. When the
+ * file corresponding to @tfile is closed, all its reference objects are
+ * deleted. A reference object can have different types depending on what
+ * it's intended for. It can be refcounting to prevent object destruction,
+ * When user-space takes a lock, it can add a ref object to that lock to
+ * make sure the lock is released if the application dies. A ref object
+ * will hold a single reference on a base object.
+ */
+extern int ttm_ref_object_add(struct ttm_object_file *tfile,
+ struct ttm_base_object *base,
+ enum ttm_ref_type ref_type, bool *existed,
+ bool require_existed);
+
+extern bool ttm_ref_object_exists(struct ttm_object_file *tfile,
+ struct ttm_base_object *base);
+
+/**
+ * ttm_ref_object_base_unref
+ *
+ * @key: Key representing the base object.
+ * @ref_type: Ref type of the ref object to be dereferenced.
+ *
+ * Unreference a ref object with type @ref_type
+ * on the base object identified by @key. If there are no duplicate
+ * references, the ref object will be destroyed and the base object
+ * will be unreferenced.
+ */
+extern int ttm_ref_object_base_unref(struct ttm_object_file *tfile,
+ unsigned long key,
+ enum ttm_ref_type ref_type);
+
+/**
+ * ttm_object_file_init - initialize a struct ttm_object file
+ *
+ * @tdev: A struct ttm_object device this file is initialized on.
+ * @hash_order: Order of the hash table used to hold the reference objects.
+ *
+ * This is typically called by the file_ops::open function.
+ */
+
+extern struct ttm_object_file *ttm_object_file_init(struct ttm_object_device
+ *tdev,
+ unsigned int hash_order);
+
+/**
+ * ttm_object_file_release - release data held by a ttm_object_file
+ *
+ * @p_tfile: Pointer to pointer to the ttm_object_file object to release.
+ * *p_tfile will be set to NULL by this function.
+ *
+ * Releases all data associated by a ttm_object_file.
+ * Typically called from file_ops::release. The caller must
+ * ensure that there are no concurrent users of tfile.
+ */
+
+extern void ttm_object_file_release(struct ttm_object_file **p_tfile);
+
+/**
+ * ttm_object device init - initialize a struct ttm_object_device
+ *
+ * @mem_glob: struct ttm_mem_global for memory accounting.
+ * @hash_order: Order of hash table used to hash the base objects.
+ * @ops: DMA buf ops for prime objects of this device.
+ *
+ * This function is typically called on device initialization to prepare
+ * data structures needed for ttm base and ref objects.
+ */
+
+extern struct ttm_object_device *
+ttm_object_device_init(struct ttm_mem_global *mem_glob,
+ unsigned int hash_order,
+ const struct dma_buf_ops *ops);
+
+/**
+ * ttm_object_device_release - release data held by a ttm_object_device
+ *
+ * @p_tdev: Pointer to pointer to the ttm_object_device object to release.
+ * *p_tdev will be set to NULL by this function.
+ *
+ * Releases all data associated by a ttm_object_device.
+ * Typically called from driver::unload before the destruction of the
+ * device private data structure.
+ */
+
+extern void ttm_object_device_release(struct ttm_object_device **p_tdev);
+
+#define ttm_base_object_kfree(__object, __base)\
+ kfree_rcu(__object, __base.rhead)
+
+extern int ttm_prime_object_init(struct ttm_object_file *tfile,
+ size_t size,
+ struct ttm_prime_object *prime,
+ bool shareable,
+ enum ttm_object_type type,
+ void (*refcount_release)
+ (struct ttm_base_object **),
+ void (*ref_obj_release)
+ (struct ttm_base_object *,
+ enum ttm_ref_type ref_type));
+
+static inline enum ttm_object_type
+ttm_base_object_type(struct ttm_base_object *base)
+{
+ return (base->object_type == ttm_prime_type) ?
+ container_of(base, struct ttm_prime_object, base)->real_type :
+ base->object_type;
+}
+extern int ttm_prime_fd_to_handle(struct ttm_object_file *tfile,
+ int fd, u32 *handle);
+extern int ttm_prime_handle_to_fd(struct ttm_object_file *tfile,
+ uint32_t handle, uint32_t flags,
+ int *prime_fd);
+
+#define ttm_prime_object_kfree(__obj, __prime) \
+ kfree_rcu(__obj, __prime.base.rhead)
+
+/*
+ * Extra memory required by the base object's idr storage, which is allocated
+ * separately from the base object itself. We estimate an on-average 128 bytes
+ * per idr.
+ */
+#define TTM_OBJ_EXTRA_SIZE 128
+
+struct ttm_base_object *
+ttm_base_object_noref_lookup(struct ttm_object_file *tfile, uint32_t key);
+
+/**
+ * ttm_base_object_noref_release - release a base object pointer looked up
+ * without reference
+ *
+ * Releases a base object pointer looked up with ttm_base_object_noref_lookup().
+ */
+static inline void ttm_base_object_noref_release(void)
+{
+ __acquire(RCU);
+ rcu_read_unlock();
+}
+#endif
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_bo.c b/drivers/gpu/drm/vmwgfx/vmwgfx_bo.c
index 2dda03345761..7ce1c2f87d9a 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_bo.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_bo.c
@@ -30,7 +30,7 @@
#include <drm/drmP.h>
#include "vmwgfx_drv.h"
-#include "drm/ttm/ttm_object.h"
+#include "ttm_object.h"
/**
@@ -441,7 +441,8 @@ static size_t vmw_bo_acc_size(struct vmw_private *dev_priv, size_t size,
struct_size = backend_size +
ttm_round_pot(sizeof(struct vmw_buffer_object));
user_struct_size = backend_size +
- ttm_round_pot(sizeof(struct vmw_user_buffer_object));
+ ttm_round_pot(sizeof(struct vmw_user_buffer_object)) +
+ TTM_OBJ_EXTRA_SIZE;
}
if (dev_priv->map_mode == vmw_dma_alloc_coherent)
@@ -631,7 +632,7 @@ int vmw_user_bo_alloc(struct vmw_private *dev_priv,
*p_base = &user_bo->prime.base;
kref_get(&(*p_base)->refcount);
}
- *handle = user_bo->prime.base.hash.key;
+ *handle = user_bo->prime.base.handle;
out_no_base_object:
return ret;
@@ -920,6 +921,47 @@ int vmw_user_bo_lookup(struct ttm_object_file *tfile,
return 0;
}
+/**
+ * vmw_user_bo_noref_lookup - Look up a vmw user buffer object without reference
+ * @tfile: The TTM object file the handle is registered with.
+ * @handle: The user buffer object handle.
+ *
+ * This function looks up a struct vmw_user_bo and returns a pointer to the
+ * struct vmw_buffer_object it derives from without refcounting the pointer.
+ * The returned pointer is only valid until vmw_user_bo_noref_release() is
+ * called, and the object pointed to by the returned pointer may be doomed.
+ * Any persistent usage of the object requires a refcount to be taken using
+ * ttm_bo_reference_unless_doomed(). Iff this function returns successfully it
+ * needs to be paired with vmw_user_bo_noref_release() and no sleeping-
+ * or scheduling functions may be called inbetween these function calls.
+ *
+ * Return: A struct vmw_buffer_object pointer if successful or negative
+ * error pointer on failure.
+ */
+struct vmw_buffer_object *
+vmw_user_bo_noref_lookup(struct ttm_object_file *tfile, u32 handle)
+{
+ struct vmw_user_buffer_object *vmw_user_bo;
+ struct ttm_base_object *base;
+
+ base = ttm_base_object_noref_lookup(tfile, handle);
+ if (!base) {
+ DRM_ERROR("Invalid buffer object handle 0x%08lx.\n",
+ (unsigned long)handle);
+ return ERR_PTR(-ESRCH);
+ }
+
+ if (unlikely(ttm_base_object_type(base) != ttm_buffer_type)) {
+ ttm_base_object_noref_release();
+ DRM_ERROR("Invalid buffer object handle 0x%08lx.\n",
+ (unsigned long)handle);
+ return ERR_PTR(-EINVAL);
+ }
+
+ vmw_user_bo = container_of(base, struct vmw_user_buffer_object,
+ prime.base);
+ return &vmw_user_bo->vbo;
+}
/**
* vmw_user_bo_reference - Open a handle to a vmw user buffer object.
@@ -940,7 +982,7 @@ int vmw_user_bo_reference(struct ttm_object_file *tfile,
user_bo = container_of(vbo, struct vmw_user_buffer_object, vbo);
- *handle = user_bo->prime.base.hash.key;
+ *handle = user_bo->prime.base.handle;
return ttm_ref_object_add(tfile, &user_bo->prime.base,
TTM_REF_USAGE, NULL, false);
}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf.c b/drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf.c
index e7e4655d3f36..48d1380a952e 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf.c
@@ -660,7 +660,7 @@ static void __vmw_cmdbuf_cur_flush(struct vmw_cmdbuf_man *man)
{
struct vmw_cmdbuf_header *cur = man->cur;
- WARN_ON(!mutex_is_locked(&man->cur_mutex));
+ lockdep_assert_held_once(&man->cur_mutex);
if (!cur)
return;
@@ -1045,7 +1045,7 @@ static void vmw_cmdbuf_commit_cur(struct vmw_cmdbuf_man *man,
{
struct vmw_cmdbuf_header *cur = man->cur;
- WARN_ON(!mutex_is_locked(&man->cur_mutex));
+ lockdep_assert_held_once(&man->cur_mutex);
WARN_ON(size > cur->reserved);
man->cur_pos += size;
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf_res.c b/drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf_res.c
index 3b75af9bf85f..4ac55fc2bf97 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf_res.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf_res.c
@@ -89,8 +89,7 @@ vmw_cmdbuf_res_lookup(struct vmw_cmdbuf_res_manager *man,
if (unlikely(ret != 0))
return ERR_PTR(ret);
- return vmw_resource_reference
- (drm_hash_entry(hash, struct vmw_cmdbuf_res, hash)->res);
+ return drm_hash_entry(hash, struct vmw_cmdbuf_res, hash)->res;
}
/**
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_context.c b/drivers/gpu/drm/vmwgfx/vmwgfx_context.c
index 7c3cb8efd11a..14bd760a62fd 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_context.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_context.c
@@ -217,9 +217,7 @@ static int vmw_gb_context_init(struct vmw_private *dev_priv,
}
}
-
-
- vmw_resource_activate(res, vmw_hw_context_destroy);
+ res->hw_destroy = vmw_hw_context_destroy;
return 0;
out_cotables:
@@ -274,7 +272,7 @@ static int vmw_context_init(struct vmw_private *dev_priv,
vmw_fifo_commit(dev_priv, sizeof(*cmd));
vmw_fifo_resource_inc(dev_priv);
- vmw_resource_activate(res, vmw_hw_context_destroy);
+ res->hw_destroy = vmw_hw_context_destroy;
return 0;
out_early:
@@ -757,14 +755,10 @@ static int vmw_context_define(struct drm_device *dev, void *data,
return -EINVAL;
}
- /*
- * Approximate idr memory usage with 128 bytes. It will be limited
- * by maximum number_of contexts anyway.
- */
-
if (unlikely(vmw_user_context_size == 0))
- vmw_user_context_size = ttm_round_pot(sizeof(*ctx)) + 128 +
- ((dev_priv->has_mob) ? vmw_cmdbuf_res_man_size() : 0);
+ vmw_user_context_size = ttm_round_pot(sizeof(*ctx)) +
+ ((dev_priv->has_mob) ? vmw_cmdbuf_res_man_size() : 0) +
+ + VMW_IDA_ACC_SIZE + TTM_OBJ_EXTRA_SIZE;
ret = ttm_read_lock(&dev_priv->reservation_sem, true);
if (unlikely(ret != 0))
@@ -809,7 +803,7 @@ static int vmw_context_define(struct drm_device *dev, void *data,
goto out_err;
}
- arg->cid = ctx->base.hash.key;
+ arg->cid = ctx->base.handle;
out_err:
vmw_resource_unreference(&res);
out_unlock:
@@ -867,9 +861,8 @@ struct vmw_resource *vmw_context_cotable(struct vmw_resource *ctx,
if (cotable_type >= SVGA_COTABLE_DX10_MAX)
return ERR_PTR(-EINVAL);
- return vmw_resource_reference
- (container_of(ctx, struct vmw_user_context, res)->
- cotables[cotable_type]);
+ return container_of(ctx, struct vmw_user_context, res)->
+ cotables[cotable_type];
}
/**
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_cotable.c b/drivers/gpu/drm/vmwgfx/vmwgfx_cotable.c
index 1d45714e1d5a..44f3f6f107d3 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_cotable.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_cotable.c
@@ -615,7 +615,7 @@ struct vmw_resource *vmw_cotable_alloc(struct vmw_private *dev_priv,
vcotbl->type = type;
vcotbl->ctx = ctx;
- vmw_resource_activate(&vcotbl->res, vmw_hw_cotable_destroy);
+ vcotbl->res.hw_destroy = vmw_hw_cotable_destroy;
return &vcotbl->res;
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
index bb6dbbe18835..61a84b958d67 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
@@ -30,9 +30,9 @@
#include <drm/drmP.h>
#include "vmwgfx_drv.h"
#include "vmwgfx_binding.h"
+#include "ttm_object.h"
#include <drm/ttm/ttm_placement.h>
#include <drm/ttm/ttm_bo_driver.h>
-#include <drm/ttm/ttm_object.h>
#include <drm/ttm/ttm_module.h>
#include <linux/dma_remapping.h>
@@ -667,8 +667,8 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset)
mutex_init(&dev_priv->binding_mutex);
mutex_init(&dev_priv->requested_layout_mutex);
mutex_init(&dev_priv->global_kms_state_mutex);
- rwlock_init(&dev_priv->resource_lock);
ttm_lock_init(&dev_priv->reservation_sem);
+ spin_lock_init(&dev_priv->resource_lock);
spin_lock_init(&dev_priv->hw_lock);
spin_lock_init(&dev_priv->waiter_lock);
spin_lock_init(&dev_priv->cap_lock);
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
index 1abe21758b0d..59f614225bcd 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
@@ -28,6 +28,7 @@
#ifndef _VMWGFX_DRV_H_
#define _VMWGFX_DRV_H_
+#include "vmwgfx_validation.h"
#include "vmwgfx_reg.h"
#include <drm/drmP.h>
#include <drm/vmwgfx_drm.h>
@@ -35,11 +36,11 @@
#include <drm/drm_auth.h>
#include <linux/suspend.h>
#include <drm/ttm/ttm_bo_driver.h>
-#include <drm/ttm/ttm_object.h>
-#include <drm/ttm/ttm_lock.h>
#include <drm/ttm/ttm_execbuf_util.h>
#include <drm/ttm/ttm_module.h>
#include "vmwgfx_fence.h"
+#include "ttm_object.h"
+#include "ttm_lock.h"
#include <linux/sync_file.h>
#define VMWGFX_DRIVER_NAME "vmwgfx"
@@ -112,21 +113,49 @@ struct vmw_validate_buffer {
};
struct vmw_res_func;
+
+
+/**
+ * struct vmw-resource - base class for hardware resources
+ *
+ * @kref: For refcounting.
+ * @dev_priv: Pointer to the device private for this resource. Immutable.
+ * @id: Device id. Protected by @dev_priv::resource_lock.
+ * @backup_size: Backup buffer size. Immutable.
+ * @res_dirty: Resource contains data not yet in the backup buffer. Protected
+ * by resource reserved.
+ * @backup_dirty: Backup buffer contains data not yet in the HW resource.
+ * Protecte by resource reserved.
+ * @backup: The backup buffer if any. Protected by resource reserved.
+ * @backup_offset: Offset into the backup buffer if any. Protected by resource
+ * reserved. Note that only a few resource types can have a @backup_offset
+ * different from zero.
+ * @pin_count: The pin count for this resource. A pinned resource has a
+ * pin-count greater than zero. It is not on the resource LRU lists and its
+ * backup buffer is pinned. Hence it can't be evicted.
+ * @func: Method vtable for this resource. Immutable.
+ * @lru_head: List head for the LRU list. Protected by @dev_priv::resource_lock.
+ * @mob_head: List head for the MOB backup list. Protected by @backup reserved.
+ * @binding_head: List head for the context binding list. Protected by
+ * the @dev_priv::binding_mutex
+ * @res_free: The resource destructor.
+ * @hw_destroy: Callback to destroy the resource on the device, as part of
+ * resource destruction.
+ */
struct vmw_resource {
struct kref kref;
struct vmw_private *dev_priv;
int id;
- bool avail;
unsigned long backup_size;
- bool res_dirty; /* Protected by backup buffer reserved */
- bool backup_dirty; /* Protected by backup buffer reserved */
+ bool res_dirty;
+ bool backup_dirty;
struct vmw_buffer_object *backup;
unsigned long backup_offset;
- unsigned long pin_count; /* Protected by resource reserved */
+ unsigned long pin_count;
const struct vmw_res_func *func;
- struct list_head lru_head; /* Protected by the resource lock */
- struct list_head mob_head; /* Protected by @backup reserved */
- struct list_head binding_head; /* Protected by binding_mutex */
+ struct list_head lru_head;
+ struct list_head mob_head;
+ struct list_head binding_head;
void (*res_free) (struct vmw_resource *res);
void (*hw_destroy) (struct vmw_resource *res);
};
@@ -204,29 +233,24 @@ struct vmw_fifo_state {
bool dx;
};
-struct vmw_relocation {
- SVGAMobId *mob_loc;
- SVGAGuestPtr *location;
- uint32_t index;
-};
-
/**
* struct vmw_res_cache_entry - resource information cache entry
- *
+ * @handle: User-space handle of a resource.
+ * @res: Non-ref-counted pointer to the resource.
+ * @valid_handle: Whether the @handle member is valid.
* @valid: Whether the entry is valid, which also implies that the execbuf
* code holds a reference to the resource, and it's placed on the
* validation list.
- * @handle: User-space handle of a resource.
- * @res: Non-ref-counted pointer to the resource.
*
* Used to avoid frequent repeated user-space handle lookups of the
* same resource.
*/
struct vmw_res_cache_entry {
- bool valid;
uint32_t handle;
struct vmw_resource *res;
- struct vmw_resource_val_node *node;
+ void *private;
+ unsigned short valid_handle;
+ unsigned short valid;
};
/**
@@ -291,35 +315,63 @@ enum vmw_display_unit_type {
vmw_du_screen_target
};
+struct vmw_validation_context;
+struct vmw_ctx_validation_info;
+/**
+ * struct vmw_sw_context - Command submission context
+ * @res_ht: Pointer hash table used to find validation duplicates
+ * @kernel: Whether the command buffer originates from kernel code rather
+ * than from user-space
+ * @fp: If @kernel is false, points to the file of the client. Otherwise
+ * NULL
+ * @cmd_bounce: Command bounce buffer used for command validation before
+ * copying to fifo space
+ * @cmd_bounce_size: Current command bounce buffer size
+ * @cur_query_bo: Current buffer object used as query result buffer
+ * @bo_relocations: List of buffer object relocations
+ * @res_relocations: List of resource relocations
+ * @buf_start: Pointer to start of memory where command validation takes
+ * place
+ * @res_cache: Cache of recently looked up resources
+ * @last_query_ctx: Last context that submitted a query
+ * @needs_post_query_barrier: Whether a query barrier is needed after
+ * command submission
+ * @staged_bindings: Cached per-context binding tracker
+ * @staged_bindings_inuse: Whether the cached per-context binding tracker
+ * is in use
+ * @staged_cmd_res: List of staged command buffer managed resources in this
+ * command buffer
+ * @ctx_list: List of context resources referenced in this command buffer
+ * @dx_ctx_node: Validation metadata of the current DX context
+ * @dx_query_mob: The MOB used for DX queries
+ * @dx_query_ctx: The DX context used for the last DX query
+ * @man: Pointer to the command buffer managed resource manager
+ * @ctx: The validation context
+ */
struct vmw_sw_context{
struct drm_open_hash res_ht;
bool res_ht_initialized;
- bool kernel; /**< is the called made from the kernel */
+ bool kernel;
struct vmw_fpriv *fp;
- struct list_head validate_nodes;
- struct vmw_relocation relocs[VMWGFX_MAX_RELOCATIONS];
- uint32_t cur_reloc;
- struct vmw_validate_buffer val_bufs[VMWGFX_MAX_VALIDATIONS];
- uint32_t cur_val_buf;
uint32_t *cmd_bounce;
uint32_t cmd_bounce_size;
- struct list_head resource_list;
- struct list_head ctx_resource_list; /* For contexts and cotables */
struct vmw_buffer_object *cur_query_bo;
+ struct list_head bo_relocations;
struct list_head res_relocations;
uint32_t *buf_start;
struct vmw_res_cache_entry res_cache[vmw_res_max];
struct vmw_resource *last_query_ctx;
bool needs_post_query_barrier;
- struct vmw_resource *error_resource;
struct vmw_ctx_binding_state *staged_bindings;
bool staged_bindings_inuse;
struct list_head staged_cmd_res;
- struct vmw_resource_val_node *dx_ctx_node;
+ struct list_head ctx_list;
+ struct vmw_ctx_validation_info *dx_ctx_node;
struct vmw_buffer_object *dx_query_mob;
struct vmw_resource *dx_query_ctx;
struct vmw_cmdbuf_res_manager *man;
+ struct vmw_validation_context *ctx;
};
struct vmw_legacy_display;
@@ -444,7 +496,7 @@ struct vmw_private {
* Context and surface management.
*/
- rwlock_t resource_lock;
+ spinlock_t resource_lock;
struct idr res_idr[vmw_res_max];
/*
* Block lastclose from racing with firstopen.
@@ -628,7 +680,7 @@ extern void vmw_resource_unreference(struct vmw_resource **p_res);
extern struct vmw_resource *vmw_resource_reference(struct vmw_resource *res);
extern struct vmw_resource *
vmw_resource_reference_unless_doomed(struct vmw_resource *res);
-extern int vmw_resource_validate(struct vmw_resource *res);
+extern int vmw_resource_validate(struct vmw_resource *res, bool intr);
extern int vmw_resource_reserve(struct vmw_resource *res, bool interruptible,
bool no_backup);
extern bool vmw_resource_needs_backup(const struct vmw_resource *res);
@@ -643,6 +695,12 @@ extern int vmw_user_resource_lookup_handle(
uint32_t handle,
const struct vmw_user_resource_conv *converter,
struct vmw_resource **p_res);
+extern struct vmw_resource *
+vmw_user_resource_noref_lookup_handle(struct vmw_private *dev_priv,
+ struct ttm_object_file *tfile,
+ uint32_t handle,
+ const struct vmw_user_resource_conv *
+ converter);
extern int vmw_stream_claim_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
extern int vmw_stream_unref_ioctl(struct drm_device *dev, void *data,
@@ -662,6 +720,15 @@ extern void vmw_resource_evict_all(struct vmw_private *dev_priv);
extern void vmw_resource_unbind_list(struct vmw_buffer_object *vbo);
/**
+ * vmw_user_resource_noref_release - release a user resource pointer looked up
+ * without reference
+ */
+static inline void vmw_user_resource_noref_release(void)
+{
+ ttm_base_object_noref_release();
+}
+
+/**
* Buffer object helper functions - vmwgfx_bo.c
*/
extern int vmw_bo_pin_in_placement(struct vmw_private *vmw_priv,
@@ -717,6 +784,18 @@ extern void vmw_bo_unmap(struct vmw_buffer_object *vbo);
extern void vmw_bo_move_notify(struct ttm_buffer_object *bo,
struct ttm_mem_reg *mem);
extern void vmw_bo_swap_notify(struct ttm_buffer_object *bo);
+extern struct vmw_buffer_object *
+vmw_user_bo_noref_lookup(struct ttm_object_file *tfile, u32 handle);
+
+/**
+ * vmw_user_bo_noref_release - release a buffer object pointer looked up
+ * without reference
+ */
+static inline void vmw_user_bo_noref_release(void)
+{
+ ttm_base_object_noref_release();
+}
+
/**
* Misc Ioctl functionality - vmwgfx_ioctl.c
@@ -864,10 +943,6 @@ extern void vmw_execbuf_copy_fence_user(struct vmw_private *dev_priv,
uint32_t fence_handle,
int32_t out_fence_fd,
struct sync_file *sync_file);
-extern int vmw_validate_single_buffer(struct vmw_private *dev_priv,
- struct ttm_buffer_object *bo,
- bool interruptible,
- bool validate_as_mob);
bool vmw_cmd_describe(const void *buf, u32 *size, char const **cmd);
/**
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
index f0ab6b2313bb..5a6b70ba137a 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
@@ -35,6 +35,23 @@
#define VMW_RES_HT_ORDER 12
+/*
+ * struct vmw_relocation - Buffer object relocation
+ *
+ * @head: List head for the command submission context's relocation list
+ * @vbo: Non ref-counted pointer to buffer object
+ * @mob_loc: Pointer to location for mob id to be modified
+ * @location: Pointer to location for guest pointer to be modified
+ */
+struct vmw_relocation {
+ struct list_head head;
+ struct vmw_buffer_object *vbo;
+ union {
+ SVGAMobId *mob_loc;
+ SVGAGuestPtr *location;
+ };
+};
+
/**
* enum vmw_resource_relocation_type - Relocation type for resources
*
@@ -69,35 +86,18 @@ struct vmw_resource_relocation {
enum vmw_resource_relocation_type rel_type:3;
};
-/**
- * struct vmw_resource_val_node - Validation info for resources
- *
- * @head: List head for the software context's resource list.
- * @hash: Hash entry for quick resouce to val_node lookup.
- * @res: Ref-counted pointer to the resource.
- * @switch_backup: Boolean whether to switch backup buffer on unreserve.
- * @new_backup: Refcounted pointer to the new backup buffer.
- * @staged_bindings: If @res is a context, tracks bindings set up during
- * the command batch. Otherwise NULL.
- * @new_backup_offset: New backup buffer offset if @new_backup is non-NUll.
- * @first_usage: Set to true the first time the resource is referenced in
- * the command stream.
- * @switching_backup: The command stream provides a new backup buffer for a
- * resource.
- * @no_buffer_needed: This means @switching_backup is true on first buffer
- * reference. So resource reservation does not need to allocate a backup
- * buffer for the resource.
+/*
+ * struct vmw_ctx_validation_info - Extra validation metadata for contexts
+ * @head: List head of context list
+ * @ctx: The context resource
+ * @cur: The context's persistent binding state
+ * @staged: The binding state changes of this command buffer
*/
-struct vmw_resource_val_node {
+struct vmw_ctx_validation_info {
struct list_head head;
- struct drm_hash_item hash;
- struct vmw_resource *res;
- struct vmw_buffer_object *new_backup;
- struct vmw_ctx_binding_state *staged_bindings;
- unsigned long new_backup_offset;
- u32 first_usage : 1;
- u32 switching_backup : 1;
- u32 no_buffer_needed : 1;
+ struct vmw_resource *ctx;
+ struct vmw_ctx_binding_state *cur;
+ struct vmw_ctx_binding_state *staged;
};
/**
@@ -127,10 +127,6 @@ static int vmw_translate_mob_ptr(struct vmw_private *dev_priv,
struct vmw_sw_context *sw_context,
SVGAMobId *id,
struct vmw_buffer_object **vmw_bo_p);
-static int vmw_bo_to_validate_list(struct vmw_sw_context *sw_context,
- struct vmw_buffer_object *vbo,
- bool validate_as_mob,
- uint32_t *p_val_node);
/**
* vmw_ptr_diff - Compute the offset from a to b in bytes
*
@@ -145,48 +141,38 @@ static size_t vmw_ptr_diff(void *a, void *b)
}
/**
- * vmw_resources_unreserve - unreserve resources previously reserved for
- * command submission.
- *
- * @sw_context: pointer to the software context
- * @backoff: Whether command submission failed.
+ * vmw_execbuf_bindings_commit - Commit modified binding state
+ * @sw_context: The command submission context
+ * @backoff: Whether this is part of the error path and binding state
+ * changes should be ignored
*/
-static void vmw_resources_unreserve(struct vmw_sw_context *sw_context,
- bool backoff)
+static void vmw_execbuf_bindings_commit(struct vmw_sw_context *sw_context,
+ bool backoff)
{
- struct vmw_resource_val_node *val;
- struct list_head *list = &sw_context->resource_list;
+ struct vmw_ctx_validation_info *entry;
- if (sw_context->dx_query_mob && !backoff)
- vmw_context_bind_dx_query(sw_context->dx_query_ctx,
- sw_context->dx_query_mob);
+ list_for_each_entry(entry, &sw_context->ctx_list, head) {
+ if (!backoff)
+ vmw_binding_state_commit(entry->cur, entry->staged);
+ if (entry->staged != sw_context->staged_bindings)
+ vmw_binding_state_free(entry->staged);
+ else
+ sw_context->staged_bindings_inuse = false;
+ }
- list_for_each_entry(val, list, head) {
- struct vmw_resource *res = val->res;
- bool switch_backup =
- (backoff) ? false : val->switching_backup;
-
- /*
- * Transfer staged context bindings to the
- * persistent context binding tracker.
- */
- if (unlikely(val->staged_bindings)) {
- if (!backoff) {
- vmw_binding_state_commit
- (vmw_context_binding_state(val->res),
- val->staged_bindings);
- }
+ /* List entries are freed with the validation context */
+ INIT_LIST_HEAD(&sw_context->ctx_list);
+}
- if (val->staged_bindings != sw_context->staged_bindings)
- vmw_binding_state_free(val->staged_bindings);
- else
- sw_context->staged_bindings_inuse = false;
- val->staged_bindings = NULL;
- }
- vmw_resource_unreserve(res, switch_backup, val->new_backup,
- val->new_backup_offset);
- vmw_bo_unreference(&val->new_backup);
- }
+/**
+ * vmw_bind_dx_query_mob - Bind the DX query MOB if referenced
+ * @sw_context: The command submission context
+ */
+static void vmw_bind_dx_query_mob(struct vmw_sw_context *sw_context)
+{
+ if (sw_context->dx_query_mob)
+ vmw_context_bind_dx_query(sw_context->dx_query_ctx,
+ sw_context->dx_query_mob);
}
/**
@@ -194,16 +180,17 @@ static void vmw_resources_unreserve(struct vmw_sw_context *sw_context,
* added to the validate list.
*
* @dev_priv: Pointer to the device private:
- * @sw_context: The validation context:
- * @node: The validation node holding this context.
+ * @sw_context: The command submission context
+ * @node: The validation node holding the context resource metadata
*/
static int vmw_cmd_ctx_first_setup(struct vmw_private *dev_priv,
struct vmw_sw_context *sw_context,
- struct vmw_resource_val_node *node)
+ struct vmw_resource *res,
+ struct vmw_ctx_validation_info *node)
{
int ret;
- ret = vmw_resource_context_res_add(dev_priv, sw_context, node->res);
+ ret = vmw_resource_context_res_add(dev_priv, sw_context, res);
if (unlikely(ret != 0))
goto out_err;
@@ -220,91 +207,138 @@ static int vmw_cmd_ctx_first_setup(struct vmw_private *dev_priv,
}
if (sw_context->staged_bindings_inuse) {
- node->staged_bindings = vmw_binding_state_alloc(dev_priv);
- if (IS_ERR(node->staged_bindings)) {
+ node->staged = vmw_binding_state_alloc(dev_priv);
+ if (IS_ERR(node->staged)) {
DRM_ERROR("Failed to allocate context binding "
"information.\n");
- ret = PTR_ERR(node->staged_bindings);
- node->staged_bindings = NULL;
+ ret = PTR_ERR(node->staged);
+ node->staged = NULL;
goto out_err;
}
} else {
- node->staged_bindings = sw_context->staged_bindings;
+ node->staged = sw_context->staged_bindings;
sw_context->staged_bindings_inuse = true;
}
+ node->ctx = res;
+ node->cur = vmw_context_binding_state(res);
+ list_add_tail(&node->head, &sw_context->ctx_list);
+
return 0;
out_err:
return ret;
}
/**
- * vmw_resource_val_add - Add a resource to the software context's
- * resource list if it's not already on it.
+ * vmw_execbuf_res_size - calculate extra size fore the resource validation
+ * node
+ * @dev_priv: Pointer to the device private struct.
+ * @res_type: The resource type.
*
- * @sw_context: Pointer to the software context.
+ * Guest-backed contexts and DX contexts require extra size to store
+ * execbuf private information in the validation node. Typically the
+ * binding manager associated data structures.
+ *
+ * Returns: The extra size requirement based on resource type.
+ */
+static unsigned int vmw_execbuf_res_size(struct vmw_private *dev_priv,
+ enum vmw_res_type res_type)
+{
+ return (res_type == vmw_res_dx_context ||
+ (res_type == vmw_res_context && dev_priv->has_mob)) ?
+ sizeof(struct vmw_ctx_validation_info) : 0;
+}
+
+/**
+ * vmw_execbuf_rcache_update - Update a resource-node cache entry
+ *
+ * @rcache: Pointer to the entry to update.
* @res: Pointer to the resource.
- * @p_node On successful return points to a valid pointer to a
- * struct vmw_resource_val_node, if non-NULL on entry.
+ * @private: Pointer to the execbuf-private space in the resource
+ * validation node.
+ */
+static void vmw_execbuf_rcache_update(struct vmw_res_cache_entry *rcache,
+ struct vmw_resource *res,
+ void *private)
+{
+ rcache->res = res;
+ rcache->private = private;
+ rcache->valid = 1;
+ rcache->valid_handle = 0;
+}
+
+/**
+ * vmw_execbuf_res_noref_val_add - Add a resource described by an
+ * unreferenced rcu-protected pointer to the validation list.
+ * @sw_context: Pointer to the software context.
+ * @res: Unreferenced rcu-protected pointer to the resource.
+ *
+ * Returns: 0 on success. Negative error code on failure. Typical error
+ * codes are %-EINVAL on inconsistency and %-ESRCH if the resource was
+ * doomed.
*/
-static int vmw_resource_val_add(struct vmw_sw_context *sw_context,
- struct vmw_resource *res,
- struct vmw_resource_val_node **p_node)
+static int vmw_execbuf_res_noref_val_add(struct vmw_sw_context *sw_context,
+ struct vmw_resource *res)
{
struct vmw_private *dev_priv = res->dev_priv;
- struct vmw_resource_val_node *node;
- struct drm_hash_item *hash;
int ret;
-
- if (likely(drm_ht_find_item(&sw_context->res_ht, (unsigned long) res,
- &hash) == 0)) {
- node = container_of(hash, struct vmw_resource_val_node, hash);
- node->first_usage = false;
- if (unlikely(p_node != NULL))
- *p_node = node;
+ enum vmw_res_type res_type = vmw_res_type(res);
+ struct vmw_res_cache_entry *rcache;
+ struct vmw_ctx_validation_info *ctx_info;
+ bool first_usage;
+ unsigned int priv_size;
+
+ rcache = &sw_context->res_cache[res_type];
+ if (likely(rcache->valid && rcache->res == res)) {
+ vmw_user_resource_noref_release();
return 0;
}
- node = kzalloc(sizeof(*node), GFP_KERNEL);
- if (unlikely(!node)) {
- DRM_ERROR("Failed to allocate a resource validation "
- "entry.\n");
- return -ENOMEM;
- }
-
- node->hash.key = (unsigned long) res;
- ret = drm_ht_insert_item(&sw_context->res_ht, &node->hash);
- if (unlikely(ret != 0)) {
- DRM_ERROR("Failed to initialize a resource validation "
- "entry.\n");
- kfree(node);
+ priv_size = vmw_execbuf_res_size(dev_priv, res_type);
+ ret = vmw_validation_add_resource(sw_context->ctx, res, priv_size,
+ (void **)&ctx_info, &first_usage);
+ vmw_user_resource_noref_release();
+ if (ret)
return ret;
+
+ if (priv_size && first_usage) {
+ ret = vmw_cmd_ctx_first_setup(dev_priv, sw_context, res,
+ ctx_info);
+ if (ret)
+ return ret;
}
- node->res = vmw_resource_reference(res);
- node->first_usage = true;
- if (unlikely(p_node != NULL))
- *p_node = node;
- if (!dev_priv->has_mob) {
- list_add_tail(&node->head, &sw_context->resource_list);
+ vmw_execbuf_rcache_update(rcache, res, ctx_info);
+ return 0;
+}
+
+/**
+ * vmw_execbuf_res_noctx_val_add - Add a non-context resource to the resource
+ * validation list if it's not already on it
+ * @sw_context: Pointer to the software context.
+ * @res: Pointer to the resource.
+ *
+ * Returns: Zero on success. Negative error code on failure.
+ */
+static int vmw_execbuf_res_noctx_val_add(struct vmw_sw_context *sw_context,
+ struct vmw_resource *res)
+{
+ struct vmw_res_cache_entry *rcache;
+ enum vmw_res_type res_type = vmw_res_type(res);
+ void *ptr;
+ int ret;
+
+ rcache = &sw_context->res_cache[res_type];
+ if (likely(rcache->valid && rcache->res == res))
return 0;
- }
- switch (vmw_res_type(res)) {
- case vmw_res_context:
- case vmw_res_dx_context:
- list_add(&node->head, &sw_context->ctx_resource_list);
- ret = vmw_cmd_ctx_first_setup(dev_priv, sw_context, node);
- break;
- case vmw_res_cotable:
- list_add_tail(&node->head, &sw_context->ctx_resource_list);
- break;
- default:
- list_add_tail(&node->head, &sw_context->resource_list);
- break;
- }
+ ret = vmw_validation_add_resource(sw_context->ctx, res, 0, &ptr, NULL);
+ if (ret)
+ return ret;
- return ret;
+ vmw_execbuf_rcache_update(rcache, res, ptr);
+
+ return 0;
}
/**
@@ -325,11 +359,11 @@ static int vmw_view_res_val_add(struct vmw_sw_context *sw_context,
* First add the resource the view is pointing to, otherwise
* it may be swapped out when the view is validated.
*/
- ret = vmw_resource_val_add(sw_context, vmw_view_srf(view), NULL);
+ ret = vmw_execbuf_res_noctx_val_add(sw_context, vmw_view_srf(view));
if (ret)
return ret;
- return vmw_resource_val_add(sw_context, view, NULL);
+ return vmw_execbuf_res_noctx_val_add(sw_context, view);
}
/**
@@ -342,28 +376,33 @@ static int vmw_view_res_val_add(struct vmw_sw_context *sw_context,
*
* The view is represented by a view id and the DX context it's created on,
* or scheduled for creation on. If there is no DX context set, the function
- * will return -EINVAL. Otherwise returns 0 on success and -EINVAL on failure.
+ * will return an -EINVAL error pointer.
+ *
+ * Returns: Unreferenced pointer to the resource on success, negative error
+ * pointer on failure.
*/
-static int vmw_view_id_val_add(struct vmw_sw_context *sw_context,
- enum vmw_view_type view_type, u32 id)
+static struct vmw_resource *
+vmw_view_id_val_add(struct vmw_sw_context *sw_context,
+ enum vmw_view_type view_type, u32 id)
{
- struct vmw_resource_val_node *ctx_node = sw_context->dx_ctx_node;
+ struct vmw_ctx_validation_info *ctx_node = sw_context->dx_ctx_node;
struct vmw_resource *view;
int ret;
if (!ctx_node) {
DRM_ERROR("DX Context not set.\n");
- return -EINVAL;
+ return ERR_PTR(-EINVAL);
}
view = vmw_view_lookup(sw_context->man, view_type, id);
if (IS_ERR(view))
- return PTR_ERR(view);
+ return view;
ret = vmw_view_res_val_add(sw_context, view);
- vmw_resource_unreference(&view);
+ if (ret)
+ return ERR_PTR(ret);
- return ret;
+ return view;
}
/**
@@ -394,8 +433,7 @@ static int vmw_resource_context_res_add(struct vmw_private *dev_priv,
if (IS_ERR(res))
continue;
- ret = vmw_resource_val_add(sw_context, res, NULL);
- vmw_resource_unreference(&res);
+ ret = vmw_execbuf_res_noctx_val_add(sw_context, res);
if (unlikely(ret != 0))
return ret;
}
@@ -407,17 +445,11 @@ static int vmw_resource_context_res_add(struct vmw_private *dev_priv,
binding_list = vmw_context_binding_list(ctx);
list_for_each_entry(entry, binding_list, ctx_list) {
- /* entry->res is not refcounted */
- res = vmw_resource_reference_unless_doomed(entry->res);
- if (unlikely(res == NULL))
- continue;
-
if (vmw_res_type(entry->res) == vmw_res_view)
ret = vmw_view_res_val_add(sw_context, entry->res);
else
- ret = vmw_resource_val_add(sw_context, entry->res,
- NULL);
- vmw_resource_unreference(&res);
+ ret = vmw_execbuf_res_noctx_val_add(sw_context,
+ entry->res);
if (unlikely(ret != 0))
break;
}
@@ -427,9 +459,8 @@ static int vmw_resource_context_res_add(struct vmw_private *dev_priv,
dx_query_mob = vmw_context_get_dx_query_mob(ctx);
if (dx_query_mob)
- ret = vmw_bo_to_validate_list(sw_context,
- dx_query_mob,
- true, NULL);
+ ret = vmw_validation_add_bo(sw_context->ctx,
+ dx_query_mob, true, false);
}
mutex_unlock(&dev_priv->binding_mutex);
@@ -445,7 +476,7 @@ static int vmw_resource_context_res_add(struct vmw_private *dev_priv,
* id that needs fixup is located. Granularity is one byte.
* @rel_type: Relocation type.
*/
-static int vmw_resource_relocation_add(struct list_head *list,
+static int vmw_resource_relocation_add(struct vmw_sw_context *sw_context,
const struct vmw_resource *res,
unsigned long offset,
enum vmw_resource_relocation_type
@@ -453,7 +484,7 @@ static int vmw_resource_relocation_add(struct list_head *list,
{
struct vmw_resource_relocation *rel;
- rel = kmalloc(sizeof(*rel), GFP_KERNEL);
+ rel = vmw_validation_mem_alloc(sw_context->ctx, sizeof(*rel));
if (unlikely(!rel)) {
DRM_ERROR("Failed to allocate a resource relocation.\n");
return -ENOMEM;
@@ -462,7 +493,7 @@ static int vmw_resource_relocation_add(struct list_head *list,
rel->res = res;
rel->offset = offset;
rel->rel_type = rel_type;
- list_add_tail(&rel->head, list);
+ list_add_tail(&rel->head, &sw_context->res_relocations);
return 0;
}
@@ -470,16 +501,13 @@ static int vmw_resource_relocation_add(struct list_head *list,
/**
* vmw_resource_relocations_free - Free all relocations on a list
*
- * @list: Pointer to the head of the relocation list.
+ * @list: Pointer to the head of the relocation list
*/
static void vmw_resource_relocations_free(struct list_head *list)
{
- struct vmw_resource_relocation *rel, *n;
+ /* Memory is validation context memory, so no need to free it */
- list_for_each_entry_safe(rel, n, list, head) {
- list_del(&rel->head);
- kfree(rel);
- }
+ INIT_LIST_HEAD(list);
}
/**
@@ -532,68 +560,6 @@ static int vmw_cmd_ok(struct vmw_private *dev_priv,
}
/**
- * vmw_bo_to_validate_list - add a bo to a validate list
- *
- * @sw_context: The software context used for this command submission batch.
- * @bo: The buffer object to add.
- * @validate_as_mob: Validate this buffer as a MOB.
- * @p_val_node: If non-NULL Will be updated with the validate node number
- * on return.
- *
- * Returns -EINVAL if the limit of number of buffer objects per command
- * submission is reached.
- */
-static int vmw_bo_to_validate_list(struct vmw_sw_context *sw_context,
- struct vmw_buffer_object *vbo,
- bool validate_as_mob,
- uint32_t *p_val_node)
-{
- uint32_t val_node;
- struct vmw_validate_buffer *vval_buf;
- struct ttm_validate_buffer *val_buf;
- struct drm_hash_item *hash;
- int ret;
-
- if (likely(drm_ht_find_item(&sw_context->res_ht, (unsigned long) vbo,
- &hash) == 0)) {
- vval_buf = container_of(hash, struct vmw_validate_buffer,
- hash);
- if (unlikely(vval_buf->validate_as_mob != validate_as_mob)) {
- DRM_ERROR("Inconsistent buffer usage.\n");
- return -EINVAL;
- }
- val_buf = &vval_buf->base;
- val_node = vval_buf - sw_context->val_bufs;
- } else {
- val_node = sw_context->cur_val_buf;
- if (unlikely(val_node >= VMWGFX_MAX_VALIDATIONS)) {
- DRM_ERROR("Max number of DMA buffers per submission "
- "exceeded.\n");
- return -EINVAL;
- }
- vval_buf = &sw_context->val_bufs[val_node];
- vval_buf->hash.key = (unsigned long) vbo;
- ret = drm_ht_insert_item(&sw_context->res_ht, &vval_buf->hash);
- if (unlikely(ret != 0)) {
- DRM_ERROR("Failed to initialize a buffer validation "
- "entry.\n");
- return ret;
- }
- ++sw_context->cur_val_buf;
- val_buf = &vval_buf->base;
- val_buf->bo = ttm_bo_reference(&vbo->base);
- val_buf->shared = false;
- list_add_tail(&val_buf->head, &sw_context->validate_nodes);
- vval_buf->validate_as_mob = validate_as_mob;
- }
-
- if (p_val_node)
- *p_val_node = val_node;
-
- return 0;
-}
-
-/**
* vmw_resources_reserve - Reserve all resources on the sw_context's
* resource list.
*
@@ -605,27 +571,11 @@ static int vmw_bo_to_validate_list(struct vmw_sw_context *sw_context,
*/
static int vmw_resources_reserve(struct vmw_sw_context *sw_context)
{
- struct vmw_resource_val_node *val;
- int ret = 0;
-
- list_for_each_entry(val, &sw_context->resource_list, head) {
- struct vmw_resource *res = val->res;
-
- ret = vmw_resource_reserve(res, true, val->no_buffer_needed);
- if (unlikely(ret != 0))
- return ret;
-
- if (res->backup) {
- struct vmw_buffer_object *vbo = res->backup;
-
- ret = vmw_bo_to_validate_list
- (sw_context, vbo,
- vmw_resource_needs_backup(res), NULL);
+ int ret;
- if (unlikely(ret != 0))
- return ret;
- }
- }
+ ret = vmw_validation_res_reserve(sw_context->ctx, true);
+ if (ret)
+ return ret;
if (sw_context->dx_query_mob) {
struct vmw_buffer_object *expected_dx_query_mob;
@@ -642,87 +592,6 @@ static int vmw_resources_reserve(struct vmw_sw_context *sw_context)
}
/**
- * vmw_resources_validate - Validate all resources on the sw_context's
- * resource list.
- *
- * @sw_context: Pointer to the software context.
- *
- * Before this function is called, all resource backup buffers must have
- * been validated.
- */
-static int vmw_resources_validate(struct vmw_sw_context *sw_context)
-{
- struct vmw_resource_val_node *val;
- int ret;
-
- list_for_each_entry(val, &sw_context->resource_list, head) {
- struct vmw_resource *res = val->res;
- struct vmw_buffer_object *backup = res->backup;
-
- ret = vmw_resource_validate(res);
- if (unlikely(ret != 0)) {
- if (ret != -ERESTARTSYS)
- DRM_ERROR("Failed to validate resource.\n");
- return ret;
- }
-
- /* Check if the resource switched backup buffer */
- if (backup && res->backup && (backup != res->backup)) {
- struct vmw_buffer_object *vbo = res->backup;
-
- ret = vmw_bo_to_validate_list
- (sw_context, vbo,
- vmw_resource_needs_backup(res), NULL);
- if (ret) {
- ttm_bo_unreserve(&vbo->base);
- return ret;
- }
- }
- }
- return 0;
-}
-
-/**
- * vmw_cmd_res_reloc_add - Add a resource to a software context's
- * relocation- and validation lists.
- *
- * @dev_priv: Pointer to a struct vmw_private identifying the device.
- * @sw_context: Pointer to the software context.
- * @id_loc: Pointer to where the id that needs translation is located.
- * @res: Valid pointer to a struct vmw_resource.
- * @p_val: If non null, a pointer to the struct vmw_resource_validate_node
- * used for this resource is returned here.
- */
-static int vmw_cmd_res_reloc_add(struct vmw_private *dev_priv,
- struct vmw_sw_context *sw_context,
- uint32_t *id_loc,
- struct vmw_resource *res,
- struct vmw_resource_val_node **p_val)
-{
- int ret;
- struct vmw_resource_val_node *node;
-
- *p_val = NULL;
- ret = vmw_resource_relocation_add(&sw_context->res_relocations,
- res,
- vmw_ptr_diff(sw_context->buf_start,
- id_loc),
- vmw_res_rel_normal);
- if (unlikely(ret != 0))
- return ret;
-
- ret = vmw_resource_val_add(sw_context, res, &node);
- if (unlikely(ret != 0))
- return ret;
-
- if (p_val)
- *p_val = node;
-
- return 0;
-}
-
-
-/**
* vmw_cmd_res_check - Check that a resource is present and if so, put it
* on the resource validate list unless it's already there.
*
@@ -741,17 +610,16 @@ vmw_cmd_res_check(struct vmw_private *dev_priv,
enum vmw_res_type res_type,
const struct vmw_user_resource_conv *converter,
uint32_t *id_loc,
- struct vmw_resource_val_node **p_val)
+ struct vmw_resource **p_res)
{
- struct vmw_res_cache_entry *rcache =
- &sw_context->res_cache[res_type];
+ struct vmw_res_cache_entry *rcache = &sw_context->res_cache[res_type];
struct vmw_resource *res;
- struct vmw_resource_val_node *node;
int ret;
+ if (p_res)
+ *p_res = NULL;
+
if (*id_loc == SVGA3D_INVALID_ID) {
- if (p_val)
- *p_val = NULL;
if (res_type == vmw_res_context) {
DRM_ERROR("Illegal context invalid id.\n");
return -EINVAL;
@@ -759,56 +627,41 @@ vmw_cmd_res_check(struct vmw_private *dev_priv,
return 0;
}
- /*
- * Fastpath in case of repeated commands referencing the same
- * resource
- */
+ if (likely(rcache->valid_handle && *id_loc == rcache->handle)) {
+ res = rcache->res;
+ } else {
+ unsigned int size = vmw_execbuf_res_size(dev_priv, res_type);
- if (likely(rcache->valid && *id_loc == rcache->handle)) {
- const struct vmw_resource *res = rcache->res;
+ ret = vmw_validation_preload_res(sw_context->ctx, size);
+ if (ret)
+ return ret;
- rcache->node->first_usage = false;
- if (p_val)
- *p_val = rcache->node;
+ res = vmw_user_resource_noref_lookup_handle
+ (dev_priv, sw_context->fp->tfile, *id_loc, converter);
+ if (unlikely(IS_ERR(res))) {
+ DRM_ERROR("Could not find or use resource 0x%08x.\n",
+ (unsigned int) *id_loc);
+ return PTR_ERR(res);
+ }
- return vmw_resource_relocation_add
- (&sw_context->res_relocations, res,
- vmw_ptr_diff(sw_context->buf_start, id_loc),
- vmw_res_rel_normal);
- }
+ ret = vmw_execbuf_res_noref_val_add(sw_context, res);
+ if (unlikely(ret != 0))
+ return ret;
- ret = vmw_user_resource_lookup_handle(dev_priv,
- sw_context->fp->tfile,
- *id_loc,
- converter,
- &res);
- if (unlikely(ret != 0)) {
- DRM_ERROR("Could not find or use resource 0x%08x.\n",
- (unsigned) *id_loc);
- dump_stack();
- return ret;
+ if (rcache->valid && rcache->res == res) {
+ rcache->valid_handle = true;
+ rcache->handle = *id_loc;
+ }
}
- rcache->valid = true;
- rcache->res = res;
- rcache->handle = *id_loc;
-
- ret = vmw_cmd_res_reloc_add(dev_priv, sw_context, id_loc,
- res, &node);
- if (unlikely(ret != 0))
- goto out_no_reloc;
+ ret = vmw_resource_relocation_add(sw_context, res,
+ vmw_ptr_diff(sw_context->buf_start,
+ id_loc),
+ vmw_res_rel_normal);
+ if (p_res)
+ *p_res = res;
- rcache->node = node;
- if (p_val)
- *p_val = node;
- vmw_resource_unreference(&res);
return 0;
-
-out_no_reloc:
- BUG_ON(sw_context->error_resource != NULL);
- sw_context->error_resource = res;
-
- return ret;
}
/**
@@ -861,22 +714,18 @@ static int vmw_rebind_all_dx_query(struct vmw_resource *ctx_res)
*/
static int vmw_rebind_contexts(struct vmw_sw_context *sw_context)
{
- struct vmw_resource_val_node *val;
+ struct vmw_ctx_validation_info *val;
int ret;
- list_for_each_entry(val, &sw_context->resource_list, head) {
- if (unlikely(!val->staged_bindings))
- break;
-
- ret = vmw_binding_rebind_all
- (vmw_context_binding_state(val->res));
+ list_for_each_entry(val, &sw_context->ctx_list, head) {
+ ret = vmw_binding_rebind_all(val->cur);
if (unlikely(ret != 0)) {
if (ret != -ERESTARTSYS)
DRM_ERROR("Failed to rebind context.\n");
return ret;
}
- ret = vmw_rebind_all_dx_query(val->res);
+ ret = vmw_rebind_all_dx_query(val->ctx);
if (ret != 0)
return ret;
}
@@ -903,45 +752,33 @@ static int vmw_view_bindings_add(struct vmw_sw_context *sw_context,
uint32 view_ids[], u32 num_views,
u32 first_slot)
{
- struct vmw_resource_val_node *ctx_node = sw_context->dx_ctx_node;
- struct vmw_cmdbuf_res_manager *man;
+ struct vmw_ctx_validation_info *ctx_node = sw_context->dx_ctx_node;
u32 i;
- int ret;
if (!ctx_node) {
DRM_ERROR("DX Context not set.\n");
return -EINVAL;
}
- man = sw_context->man;
for (i = 0; i < num_views; ++i) {
struct vmw_ctx_bindinfo_view binding;
struct vmw_resource *view = NULL;
if (view_ids[i] != SVGA3D_INVALID_ID) {
- view = vmw_view_lookup(man, view_type, view_ids[i]);
+ view = vmw_view_id_val_add(sw_context, view_type,
+ view_ids[i]);
if (IS_ERR(view)) {
DRM_ERROR("View not found.\n");
return PTR_ERR(view);
}
-
- ret = vmw_view_res_val_add(sw_context, view);
- if (ret) {
- DRM_ERROR("Could not add view to "
- "validation list.\n");
- vmw_resource_unreference(&view);
- return ret;
- }
}
- binding.bi.ctx = ctx_node->res;
+ binding.bi.ctx = ctx_node->ctx;
binding.bi.res = view;
binding.bi.bt = binding_type;
binding.shader_slot = shader_slot;
binding.slot = first_slot + i;
- vmw_binding_add(ctx_node->staged_bindings, &binding.bi,
+ vmw_binding_add(ctx_node->staged, &binding.bi,
shader_slot, binding.slot);
- if (view)
- vmw_resource_unreference(&view);
}
return 0;
@@ -971,6 +808,34 @@ static int vmw_cmd_cid_check(struct vmw_private *dev_priv,
user_context_converter, &cmd->cid, NULL);
}
+/**
+ * vmw_execbuf_info_from_res - Get the private validation metadata for a
+ * recently validated resource
+ * @sw_context: Pointer to the command submission context
+ * @res: The resource
+ *
+ * The resource pointed to by @res needs to be present in the command submission
+ * context's resource cache and hence the last resource of that type to be
+ * processed by the validation code.
+ *
+ * Return: a pointer to the private metadata of the resource, or NULL
+ * if it wasn't found
+ */
+static struct vmw_ctx_validation_info *
+vmw_execbuf_info_from_res(struct vmw_sw_context *sw_context,
+ struct vmw_resource *res)
+{
+ struct vmw_res_cache_entry *rcache =
+ &sw_context->res_cache[vmw_res_type(res)];
+
+ if (rcache->valid && rcache->res == res)
+ return rcache->private;
+
+ WARN_ON_ONCE(true);
+ return NULL;
+}
+
+
static int vmw_cmd_set_render_target_check(struct vmw_private *dev_priv,
struct vmw_sw_context *sw_context,
SVGA3dCmdHeader *header)
@@ -979,8 +844,8 @@ static int vmw_cmd_set_render_target_check(struct vmw_private *dev_priv,
SVGA3dCmdHeader header;
SVGA3dCmdSetRenderTarget body;
} *cmd;
- struct vmw_resource_val_node *ctx_node;
- struct vmw_resource_val_node *res_node;
+ struct vmw_resource *ctx;
+ struct vmw_resource *res;
int ret;
cmd = container_of(header, struct vmw_sid_cmd, header);
@@ -993,25 +858,29 @@ static int vmw_cmd_set_render_target_check(struct vmw_private *dev_priv,
ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
user_context_converter, &cmd->body.cid,
- &ctx_node);
+ &ctx);
if (unlikely(ret != 0))
return ret;
ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
- user_surface_converter,
- &cmd->body.target.sid, &res_node);
- if (unlikely(ret != 0))
+ user_surface_converter, &cmd->body.target.sid,
+ &res);
+ if (unlikely(ret))
return ret;
if (dev_priv->has_mob) {
struct vmw_ctx_bindinfo_view binding;
+ struct vmw_ctx_validation_info *node;
- binding.bi.ctx = ctx_node->res;
- binding.bi.res = res_node ? res_node->res : NULL;
+ node = vmw_execbuf_info_from_res(sw_context, ctx);
+ if (!node)
+ return -EINVAL;
+
+ binding.bi.ctx = ctx;
+ binding.bi.res = res;
binding.bi.bt = vmw_ctx_binding_rt;
binding.slot = cmd->body.type;
- vmw_binding_add(ctx_node->staged_bindings,
- &binding.bi, 0, binding.slot);
+ vmw_binding_add(node->staged, &binding.bi, 0, binding.slot);
}
return 0;
@@ -1030,8 +899,8 @@ static int vmw_cmd_surface_copy_check(struct vmw_private *dev_priv,
cmd = container_of(header, struct vmw_sid_cmd, header);
ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
- user_surface_converter,
- &cmd->body.src.sid, NULL);
+ user_surface_converter,
+ &cmd->body.src.sid, NULL);
if (ret)
return ret;
@@ -1171,17 +1040,17 @@ static int vmw_query_bo_switch_prepare(struct vmw_private *dev_priv,
if (unlikely(sw_context->cur_query_bo != NULL)) {
sw_context->needs_post_query_barrier = true;
- ret = vmw_bo_to_validate_list(sw_context,
- sw_context->cur_query_bo,
- dev_priv->has_mob, NULL);
+ ret = vmw_validation_add_bo(sw_context->ctx,
+ sw_context->cur_query_bo,
+ dev_priv->has_mob, false);
if (unlikely(ret != 0))
return ret;
}
sw_context->cur_query_bo = new_query_bo;
- ret = vmw_bo_to_validate_list(sw_context,
- dev_priv->dummy_query_bo,
- dev_priv->has_mob, NULL);
+ ret = vmw_validation_add_bo(sw_context->ctx,
+ dev_priv->dummy_query_bo,
+ dev_priv->has_mob, false);
if (unlikely(ret != 0))
return ret;
@@ -1269,7 +1138,7 @@ static void vmw_query_bo_switch_commit(struct vmw_private *dev_priv,
* @sw_context: The software context used for this command batch validation.
* @id: Pointer to the user-space handle to be translated.
* @vmw_bo_p: Points to a location that, on successful return will carry
- * a reference-counted pointer to the DMA buffer identified by the
+ * a non-reference-counted pointer to the buffer object identified by the
* user-space handle in @id.
*
* This function saves information needed to translate a user-space buffer
@@ -1284,40 +1153,34 @@ static int vmw_translate_mob_ptr(struct vmw_private *dev_priv,
SVGAMobId *id,
struct vmw_buffer_object **vmw_bo_p)
{
- struct vmw_buffer_object *vmw_bo = NULL;
+ struct vmw_buffer_object *vmw_bo;
uint32_t handle = *id;
struct vmw_relocation *reloc;
int ret;
- ret = vmw_user_bo_lookup(sw_context->fp->tfile, handle, &vmw_bo, NULL);
- if (unlikely(ret != 0)) {
+ vmw_validation_preload_bo(sw_context->ctx);
+ vmw_bo = vmw_user_bo_noref_lookup(sw_context->fp->tfile, handle);
+ if (IS_ERR(vmw_bo)) {
DRM_ERROR("Could not find or use MOB buffer.\n");
- ret = -EINVAL;
- goto out_no_reloc;
+ return PTR_ERR(vmw_bo);
}
- if (unlikely(sw_context->cur_reloc >= VMWGFX_MAX_RELOCATIONS)) {
- DRM_ERROR("Max number relocations per submission"
- " exceeded\n");
- ret = -EINVAL;
- goto out_no_reloc;
- }
+ ret = vmw_validation_add_bo(sw_context->ctx, vmw_bo, true, false);
+ vmw_user_bo_noref_release();
+ if (unlikely(ret != 0))
+ return ret;
- reloc = &sw_context->relocs[sw_context->cur_reloc++];
- reloc->mob_loc = id;
- reloc->location = NULL;
+ reloc = vmw_validation_mem_alloc(sw_context->ctx, sizeof(*reloc));
+ if (!reloc)
+ return -ENOMEM;
- ret = vmw_bo_to_validate_list(sw_context, vmw_bo, true, &reloc->index);
- if (unlikely(ret != 0))
- goto out_no_reloc;
+ reloc->mob_loc = id;
+ reloc->vbo = vmw_bo;
*vmw_bo_p = vmw_bo;
- return 0;
+ list_add_tail(&reloc->head, &sw_context->bo_relocations);
-out_no_reloc:
- vmw_bo_unreference(&vmw_bo);
- *vmw_bo_p = NULL;
- return ret;
+ return 0;
}
/**
@@ -1328,7 +1191,7 @@ out_no_reloc:
* @sw_context: The software context used for this command batch validation.
* @ptr: Pointer to the user-space handle to be translated.
* @vmw_bo_p: Points to a location that, on successful return will carry
- * a reference-counted pointer to the DMA buffer identified by the
+ * a non-reference-counted pointer to the DMA buffer identified by the
* user-space handle in @id.
*
* This function saves information needed to translate a user-space buffer
@@ -1344,39 +1207,33 @@ static int vmw_translate_guest_ptr(struct vmw_private *dev_priv,
SVGAGuestPtr *ptr,
struct vmw_buffer_object **vmw_bo_p)
{
- struct vmw_buffer_object *vmw_bo = NULL;
+ struct vmw_buffer_object *vmw_bo;
uint32_t handle = ptr->gmrId;
struct vmw_relocation *reloc;
int ret;
- ret = vmw_user_bo_lookup(sw_context->fp->tfile, handle, &vmw_bo, NULL);
- if (unlikely(ret != 0)) {
+ vmw_validation_preload_bo(sw_context->ctx);
+ vmw_bo = vmw_user_bo_noref_lookup(sw_context->fp->tfile, handle);
+ if (IS_ERR(vmw_bo)) {
DRM_ERROR("Could not find or use GMR region.\n");
- ret = -EINVAL;
- goto out_no_reloc;
- }
-
- if (unlikely(sw_context->cur_reloc >= VMWGFX_MAX_RELOCATIONS)) {
- DRM_ERROR("Max number relocations per submission"
- " exceeded\n");
- ret = -EINVAL;
- goto out_no_reloc;
+ return PTR_ERR(vmw_bo);
}
- reloc = &sw_context->relocs[sw_context->cur_reloc++];
- reloc->location = ptr;
-
- ret = vmw_bo_to_validate_list(sw_context, vmw_bo, false, &reloc->index);
+ ret = vmw_validation_add_bo(sw_context->ctx, vmw_bo, false, false);
+ vmw_user_bo_noref_release();
if (unlikely(ret != 0))
- goto out_no_reloc;
+ return ret;
+
+ reloc = vmw_validation_mem_alloc(sw_context->ctx, sizeof(*reloc));
+ if (!reloc)
+ return -ENOMEM;
+ reloc->location = ptr;
+ reloc->vbo = vmw_bo;
*vmw_bo_p = vmw_bo;
- return 0;
+ list_add_tail(&reloc->head, &sw_context->bo_relocations);
-out_no_reloc:
- vmw_bo_unreference(&vmw_bo);
- *vmw_bo_p = NULL;
- return ret;
+ return 0;
}
@@ -1400,7 +1257,7 @@ static int vmw_cmd_dx_define_query(struct vmw_private *dev_priv,
} *cmd;
int ret;
- struct vmw_resource_val_node *ctx_node = sw_context->dx_ctx_node;
+ struct vmw_ctx_validation_info *ctx_node = sw_context->dx_ctx_node;
struct vmw_resource *cotable_res;
@@ -1415,9 +1272,8 @@ static int vmw_cmd_dx_define_query(struct vmw_private *dev_priv,
cmd->q.type >= SVGA3D_QUERYTYPE_MAX)
return -EINVAL;
- cotable_res = vmw_context_cotable(ctx_node->res, SVGA_COTABLE_DXQUERY);
+ cotable_res = vmw_context_cotable(ctx_node->ctx, SVGA_COTABLE_DXQUERY);
ret = vmw_cotable_notify(cotable_res, cmd->q.queryId);
- vmw_resource_unreference(&cotable_res);
return ret;
}
@@ -1462,11 +1318,8 @@ static int vmw_cmd_dx_bind_query(struct vmw_private *dev_priv,
return ret;
sw_context->dx_query_mob = vmw_bo;
- sw_context->dx_query_ctx = sw_context->dx_ctx_node->res;
-
- vmw_bo_unreference(&vmw_bo);
-
- return ret;
+ sw_context->dx_query_ctx = sw_context->dx_ctx_node->ctx;
+ return 0;
}
@@ -1567,7 +1420,6 @@ static int vmw_cmd_end_gb_query(struct vmw_private *dev_priv,
ret = vmw_query_bo_switch_prepare(dev_priv, vmw_bo, sw_context);
- vmw_bo_unreference(&vmw_bo);
return ret;
}
@@ -1621,7 +1473,6 @@ static int vmw_cmd_end_query(struct vmw_private *dev_priv,
ret = vmw_query_bo_switch_prepare(dev_priv, vmw_bo, sw_context);
- vmw_bo_unreference(&vmw_bo);
return ret;
}
@@ -1654,7 +1505,6 @@ static int vmw_cmd_wait_gb_query(struct vmw_private *dev_priv,
if (unlikely(ret != 0))
return ret;
- vmw_bo_unreference(&vmw_bo);
return 0;
}
@@ -1706,7 +1556,6 @@ static int vmw_cmd_wait_query(struct vmw_private *dev_priv,
if (unlikely(ret != 0))
return ret;
- vmw_bo_unreference(&vmw_bo);
return 0;
}
@@ -1757,7 +1606,7 @@ static int vmw_cmd_dma(struct vmw_private *dev_priv,
if (unlikely(ret != 0)) {
if (unlikely(ret != -ERESTARTSYS))
DRM_ERROR("could not find surface for DMA.\n");
- goto out_no_surface;
+ return ret;
}
srf = vmw_res_to_srf(sw_context->res_cache[vmw_res_surface].res);
@@ -1765,9 +1614,7 @@ static int vmw_cmd_dma(struct vmw_private *dev_priv,
vmw_kms_cursor_snoop(srf, sw_context->fp->tfile, &vmw_bo->base,
header);
-out_no_surface:
- vmw_bo_unreference(&vmw_bo);
- return ret;
+ return 0;
}
static int vmw_cmd_draw(struct vmw_private *dev_priv,
@@ -1837,8 +1684,8 @@ static int vmw_cmd_tex_state(struct vmw_private *dev_priv,
((unsigned long) header + header->size + sizeof(header));
SVGA3dTextureState *cur_state = (SVGA3dTextureState *)
((unsigned long) header + sizeof(struct vmw_tex_state_cmd));
- struct vmw_resource_val_node *ctx_node;
- struct vmw_resource_val_node *res_node;
+ struct vmw_resource *ctx;
+ struct vmw_resource *res;
int ret;
cmd = container_of(header, struct vmw_tex_state_cmd,
@@ -1846,7 +1693,7 @@ static int vmw_cmd_tex_state(struct vmw_private *dev_priv,
ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
user_context_converter, &cmd->state.cid,
- &ctx_node);
+ &ctx);
if (unlikely(ret != 0))
return ret;
@@ -1862,19 +1709,24 @@ static int vmw_cmd_tex_state(struct vmw_private *dev_priv,
ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
user_surface_converter,
- &cur_state->value, &res_node);
+ &cur_state->value, &res);
if (unlikely(ret != 0))
return ret;
if (dev_priv->has_mob) {
struct vmw_ctx_bindinfo_tex binding;
+ struct vmw_ctx_validation_info *node;
+
+ node = vmw_execbuf_info_from_res(sw_context, ctx);
+ if (!node)
+ return -EINVAL;
- binding.bi.ctx = ctx_node->res;
- binding.bi.res = res_node ? res_node->res : NULL;
+ binding.bi.ctx = ctx;
+ binding.bi.res = res;
binding.bi.bt = vmw_ctx_binding_tex;
binding.texture_stage = cur_state->stage;
- vmw_binding_add(ctx_node->staged_bindings, &binding.bi,
- 0, binding.texture_stage);
+ vmw_binding_add(node->staged, &binding.bi, 0,
+ binding.texture_stage);
}
}
@@ -1893,14 +1745,9 @@ static int vmw_cmd_check_define_gmrfb(struct vmw_private *dev_priv,
SVGAFifoCmdDefineGMRFB body;
} *cmd = buf;
- ret = vmw_translate_guest_ptr(dev_priv, sw_context,
- &cmd->body.ptr,
- &vmw_bo);
- if (unlikely(ret != 0))
- return ret;
-
- vmw_bo_unreference(&vmw_bo);
-
+ return vmw_translate_guest_ptr(dev_priv, sw_context,
+ &cmd->body.ptr,
+ &vmw_bo);
return ret;
}
@@ -1922,25 +1769,24 @@ static int vmw_cmd_check_define_gmrfb(struct vmw_private *dev_priv,
*/
static int vmw_cmd_res_switch_backup(struct vmw_private *dev_priv,
struct vmw_sw_context *sw_context,
- struct vmw_resource_val_node *val_node,
+ struct vmw_resource *res,
uint32_t *buf_id,
unsigned long backup_offset)
{
- struct vmw_buffer_object *dma_buf;
+ struct vmw_buffer_object *vbo;
+ void *info;
int ret;
- ret = vmw_translate_mob_ptr(dev_priv, sw_context, buf_id, &dma_buf);
+ info = vmw_execbuf_info_from_res(sw_context, res);
+ if (!info)
+ return -EINVAL;
+
+ ret = vmw_translate_mob_ptr(dev_priv, sw_context, buf_id, &vbo);
if (ret)
return ret;
- val_node->switching_backup = true;
- if (val_node->first_usage)
- val_node->no_buffer_needed = true;
-
- vmw_bo_unreference(&val_node->new_backup);
- val_node->new_backup = dma_buf;
- val_node->new_backup_offset = backup_offset;
-
+ vmw_validation_res_switch_backup(sw_context->ctx, info, vbo,
+ backup_offset);
return 0;
}
@@ -1970,15 +1816,15 @@ static int vmw_cmd_switch_backup(struct vmw_private *dev_priv,
uint32_t *buf_id,
unsigned long backup_offset)
{
- struct vmw_resource_val_node *val_node;
+ struct vmw_resource *res;
int ret;
ret = vmw_cmd_res_check(dev_priv, sw_context, res_type,
- converter, res_id, &val_node);
+ converter, res_id, &res);
if (ret)
return ret;
- return vmw_cmd_res_switch_backup(dev_priv, sw_context, val_node,
+ return vmw_cmd_res_switch_backup(dev_priv, sw_context, res,
buf_id, backup_offset);
}
@@ -2170,14 +2016,14 @@ static int vmw_cmd_shader_define(struct vmw_private *dev_priv,
} *cmd;
int ret;
size_t size;
- struct vmw_resource_val_node *val;
+ struct vmw_resource *ctx;
cmd = container_of(header, struct vmw_shader_define_cmd,
header);
ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
user_context_converter, &cmd->body.cid,
- &val);
+ &ctx);
if (unlikely(ret != 0))
return ret;
@@ -2186,14 +2032,14 @@ static int vmw_cmd_shader_define(struct vmw_private *dev_priv,
size = cmd->header.size - sizeof(cmd->body);
ret = vmw_compat_shader_add(dev_priv,
- vmw_context_res_man(val->res),
+ vmw_context_res_man(ctx),
cmd->body.shid, cmd + 1,
cmd->body.type, size,
&sw_context->staged_cmd_res);
if (unlikely(ret != 0))
return ret;
- return vmw_resource_relocation_add(&sw_context->res_relocations,
+ return vmw_resource_relocation_add(sw_context,
NULL,
vmw_ptr_diff(sw_context->buf_start,
&cmd->header.id),
@@ -2217,28 +2063,28 @@ static int vmw_cmd_shader_destroy(struct vmw_private *dev_priv,
SVGA3dCmdDestroyShader body;
} *cmd;
int ret;
- struct vmw_resource_val_node *val;
+ struct vmw_resource *ctx;
cmd = container_of(header, struct vmw_shader_destroy_cmd,
header);
ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
user_context_converter, &cmd->body.cid,
- &val);
+ &ctx);
if (unlikely(ret != 0))
return ret;
if (unlikely(!dev_priv->has_mob))
return 0;
- ret = vmw_shader_remove(vmw_context_res_man(val->res),
+ ret = vmw_shader_remove(vmw_context_res_man(ctx),
cmd->body.shid,
cmd->body.type,
&sw_context->staged_cmd_res);
if (unlikely(ret != 0))
return ret;
- return vmw_resource_relocation_add(&sw_context->res_relocations,
+ return vmw_resource_relocation_add(sw_context,
NULL,
vmw_ptr_diff(sw_context->buf_start,
&cmd->header.id),
@@ -2261,9 +2107,9 @@ static int vmw_cmd_set_shader(struct vmw_private *dev_priv,
SVGA3dCmdHeader header;
SVGA3dCmdSetShader body;
} *cmd;
- struct vmw_resource_val_node *ctx_node, *res_node = NULL;
struct vmw_ctx_bindinfo_shader binding;
- struct vmw_resource *res = NULL;
+ struct vmw_resource *ctx, *res = NULL;
+ struct vmw_ctx_validation_info *ctx_info;
int ret;
cmd = container_of(header, struct vmw_set_shader_cmd,
@@ -2277,7 +2123,7 @@ static int vmw_cmd_set_shader(struct vmw_private *dev_priv,
ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
user_context_converter, &cmd->body.cid,
- &ctx_node);
+ &ctx);
if (unlikely(ret != 0))
return ret;
@@ -2285,34 +2131,35 @@ static int vmw_cmd_set_shader(struct vmw_private *dev_priv,
return 0;
if (cmd->body.shid != SVGA3D_INVALID_ID) {
- res = vmw_shader_lookup(vmw_context_res_man(ctx_node->res),
+ res = vmw_shader_lookup(vmw_context_res_man(ctx),
cmd->body.shid,
cmd->body.type);
if (!IS_ERR(res)) {
- ret = vmw_cmd_res_reloc_add(dev_priv, sw_context,
- &cmd->body.shid, res,
- &res_node);
- vmw_resource_unreference(&res);
+ ret = vmw_execbuf_res_noctx_val_add(sw_context, res);
if (unlikely(ret != 0))
return ret;
}
}
- if (!res_node) {
+ if (IS_ERR_OR_NULL(res)) {
ret = vmw_cmd_res_check(dev_priv, sw_context,
vmw_res_shader,
user_shader_converter,
- &cmd->body.shid, &res_node);
+ &cmd->body.shid, &res);
if (unlikely(ret != 0))
return ret;
}
- binding.bi.ctx = ctx_node->res;
- binding.bi.res = res_node ? res_node->res : NULL;
+ ctx_info = vmw_execbuf_info_from_res(sw_context, ctx);
+ if (!ctx_info)
+ return -EINVAL;
+
+ binding.bi.ctx = ctx;
+ binding.bi.res = res;
binding.bi.bt = vmw_ctx_binding_shader;
binding.shader_slot = cmd->body.type - SVGA3D_SHADERTYPE_MIN;
- vmw_binding_add(ctx_node->staged_bindings, &binding.bi,
+ vmw_binding_add(ctx_info->staged, &binding.bi,
binding.shader_slot, 0);
return 0;
}
@@ -2393,8 +2240,8 @@ vmw_cmd_dx_set_single_constant_buffer(struct vmw_private *dev_priv,
SVGA3dCmdHeader header;
SVGA3dCmdDXSetSingleConstantBuffer body;
} *cmd;
- struct vmw_resource_val_node *res_node = NULL;
- struct vmw_resource_val_node *ctx_node = sw_context->dx_ctx_node;
+ struct vmw_resource *res = NULL;
+ struct vmw_ctx_validation_info *ctx_node = sw_context->dx_ctx_node;
struct vmw_ctx_bindinfo_cb binding;
int ret;
@@ -2406,12 +2253,12 @@ vmw_cmd_dx_set_single_constant_buffer(struct vmw_private *dev_priv,
cmd = container_of(header, typeof(*cmd), header);
ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
user_surface_converter,
- &cmd->body.sid, &res_node);
+ &cmd->body.sid, &res);
if (unlikely(ret != 0))
return ret;
- binding.bi.ctx = ctx_node->res;
- binding.bi.res = res_node ? res_node->res : NULL;
+ binding.bi.ctx = ctx_node->ctx;
+ binding.bi.res = res;
binding.bi.bt = vmw_ctx_binding_cb;
binding.shader_slot = cmd->body.type - SVGA3D_SHADERTYPE_MIN;
binding.offset = cmd->body.offsetInBytes;
@@ -2426,7 +2273,7 @@ vmw_cmd_dx_set_single_constant_buffer(struct vmw_private *dev_priv,
return -EINVAL;
}
- vmw_binding_add(ctx_node->staged_bindings, &binding.bi,
+ vmw_binding_add(ctx_node->staged, &binding.bi,
binding.shader_slot, binding.slot);
return 0;
@@ -2482,7 +2329,7 @@ static int vmw_cmd_dx_set_shader(struct vmw_private *dev_priv,
SVGA3dCmdDXSetShader body;
} *cmd;
struct vmw_resource *res = NULL;
- struct vmw_resource_val_node *ctx_node = sw_context->dx_ctx_node;
+ struct vmw_ctx_validation_info *ctx_node = sw_context->dx_ctx_node;
struct vmw_ctx_bindinfo_shader binding;
int ret = 0;
@@ -2506,23 +2353,20 @@ static int vmw_cmd_dx_set_shader(struct vmw_private *dev_priv,
return PTR_ERR(res);
}
- ret = vmw_resource_val_add(sw_context, res, NULL);
+ ret = vmw_execbuf_res_noctx_val_add(sw_context, res);
if (ret)
- goto out_unref;
+ return ret;
}
- binding.bi.ctx = ctx_node->res;
+ binding.bi.ctx = ctx_node->ctx;
binding.bi.res = res;
binding.bi.bt = vmw_ctx_binding_dx_shader;
binding.shader_slot = cmd->body.type - SVGA3D_SHADERTYPE_MIN;
- vmw_binding_add(ctx_node->staged_bindings, &binding.bi,
+ vmw_binding_add(ctx_node->staged, &binding.bi,
binding.shader_slot, 0);
-out_unref:
- if (res)
- vmw_resource_unreference(&res);
- return ret;
+ return 0;
}
/**
@@ -2537,9 +2381,9 @@ static int vmw_cmd_dx_set_vertex_buffers(struct vmw_private *dev_priv,
struct vmw_sw_context *sw_context,
SVGA3dCmdHeader *header)
{
- struct vmw_resource_val_node *ctx_node = sw_context->dx_ctx_node;
+ struct vmw_ctx_validation_info *ctx_node = sw_context->dx_ctx_node;
struct vmw_ctx_bindinfo_vb binding;
- struct vmw_resource_val_node *res_node;
+ struct vmw_resource *res;
struct {
SVGA3dCmdHeader header;
SVGA3dCmdDXSetVertexBuffers body;
@@ -2564,18 +2408,18 @@ static int vmw_cmd_dx_set_vertex_buffers(struct vmw_private *dev_priv,
for (i = 0; i < num; i++) {
ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
user_surface_converter,
- &cmd->buf[i].sid, &res_node);
+ &cmd->buf[i].sid, &res);
if (unlikely(ret != 0))
return ret;
- binding.bi.ctx = ctx_node->res;
+ binding.bi.ctx = ctx_node->ctx;
binding.bi.bt = vmw_ctx_binding_vb;
- binding.bi.res = ((res_node) ? res_node->res : NULL);
+ binding.bi.res = res;
binding.offset = cmd->buf[i].offset;
binding.stride = cmd->buf[i].stride;
binding.slot = i + cmd->body.startBuffer;
- vmw_binding_add(ctx_node->staged_bindings, &binding.bi,
+ vmw_binding_add(ctx_node->staged, &binding.bi,
0, binding.slot);
}
@@ -2594,9 +2438,9 @@ static int vmw_cmd_dx_set_index_buffer(struct vmw_private *dev_priv,
struct vmw_sw_context *sw_context,
SVGA3dCmdHeader *header)
{
- struct vmw_resource_val_node *ctx_node = sw_context->dx_ctx_node;
+ struct vmw_ctx_validation_info *ctx_node = sw_context->dx_ctx_node;
struct vmw_ctx_bindinfo_ib binding;
- struct vmw_resource_val_node *res_node;
+ struct vmw_resource *res;
struct {
SVGA3dCmdHeader header;
SVGA3dCmdDXSetIndexBuffer body;
@@ -2611,17 +2455,17 @@ static int vmw_cmd_dx_set_index_buffer(struct vmw_private *dev_priv,
cmd = container_of(header, typeof(*cmd), header);
ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
user_surface_converter,
- &cmd->body.sid, &res_node);
+ &cmd->body.sid, &res);
if (unlikely(ret != 0))
return ret;
- binding.bi.ctx = ctx_node->res;
- binding.bi.res = ((res_node) ? res_node->res : NULL);
+ binding.bi.ctx = ctx_node->ctx;
+ binding.bi.res = res;
binding.bi.bt = vmw_ctx_binding_ib;
binding.offset = cmd->body.offset;
binding.format = cmd->body.format;
- vmw_binding_add(ctx_node->staged_bindings, &binding.bi, 0, 0);
+ vmw_binding_add(ctx_node->staged, &binding.bi, 0, 0);
return 0;
}
@@ -2679,8 +2523,8 @@ static int vmw_cmd_dx_clear_rendertarget_view(struct vmw_private *dev_priv,
SVGA3dCmdDXClearRenderTargetView body;
} *cmd = container_of(header, typeof(*cmd), header);
- return vmw_view_id_val_add(sw_context, vmw_view_rt,
- cmd->body.renderTargetViewId);
+ return PTR_RET(vmw_view_id_val_add(sw_context, vmw_view_rt,
+ cmd->body.renderTargetViewId));
}
/**
@@ -2700,16 +2544,16 @@ static int vmw_cmd_dx_clear_depthstencil_view(struct vmw_private *dev_priv,
SVGA3dCmdDXClearDepthStencilView body;
} *cmd = container_of(header, typeof(*cmd), header);
- return vmw_view_id_val_add(sw_context, vmw_view_ds,
- cmd->body.depthStencilViewId);
+ return PTR_RET(vmw_view_id_val_add(sw_context, vmw_view_ds,
+ cmd->body.depthStencilViewId));
}
static int vmw_cmd_dx_view_define(struct vmw_private *dev_priv,
struct vmw_sw_context *sw_context,
SVGA3dCmdHeader *header)
{
- struct vmw_resource_val_node *ctx_node = sw_context->dx_ctx_node;
- struct vmw_resource_val_node *srf_node;
+ struct vmw_ctx_validation_info *ctx_node = sw_context->dx_ctx_node;
+ struct vmw_resource *srf;
struct vmw_resource *res;
enum vmw_view_type view_type;
int ret;
@@ -2734,19 +2578,18 @@ static int vmw_cmd_dx_view_define(struct vmw_private *dev_priv,
cmd = container_of(header, typeof(*cmd), header);
ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
user_surface_converter,
- &cmd->sid, &srf_node);
+ &cmd->sid, &srf);
if (unlikely(ret != 0))
return ret;
- res = vmw_context_cotable(ctx_node->res, vmw_view_cotables[view_type]);
+ res = vmw_context_cotable(ctx_node->ctx, vmw_view_cotables[view_type]);
ret = vmw_cotable_notify(res, cmd->defined_id);
- vmw_resource_unreference(&res);
if (unlikely(ret != 0))
return ret;
return vmw_view_add(sw_context->man,
- ctx_node->res,
- srf_node->res,
+ ctx_node->ctx,
+ srf,
view_type,
cmd->defined_id,
header,
@@ -2766,9 +2609,9 @@ static int vmw_cmd_dx_set_so_targets(struct vmw_private *dev_priv,
struct vmw_sw_context *sw_context,
SVGA3dCmdHeader *header)
{
- struct vmw_resource_val_node *ctx_node = sw_context->dx_ctx_node;
+ struct vmw_ctx_validation_info *ctx_node = sw_context->dx_ctx_node;
struct vmw_ctx_bindinfo_so binding;
- struct vmw_resource_val_node *res_node;
+ struct vmw_resource *res;
struct {
SVGA3dCmdHeader header;
SVGA3dCmdDXSetSOTargets body;
@@ -2793,18 +2636,18 @@ static int vmw_cmd_dx_set_so_targets(struct vmw_private *dev_priv,
for (i = 0; i < num; i++) {
ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
user_surface_converter,
- &cmd->targets[i].sid, &res_node);
+ &cmd->targets[i].sid, &res);
if (unlikely(ret != 0))
return ret;
- binding.bi.ctx = ctx_node->res;
- binding.bi.res = ((res_node) ? res_node->res : NULL);
+ binding.bi.ctx = ctx_node->ctx;
+ binding.bi.res = res;
binding.bi.bt = vmw_ctx_binding_so,
binding.offset = cmd->targets[i].offset;
binding.size = cmd->targets[i].sizeInBytes;
binding.slot = i;
- vmw_binding_add(ctx_node->staged_bindings, &binding.bi,
+ vmw_binding_add(ctx_node->staged, &binding.bi,
0, binding.slot);
}
@@ -2815,7 +2658,7 @@ static int vmw_cmd_dx_so_define(struct vmw_private *dev_priv,
struct vmw_sw_context *sw_context,
SVGA3dCmdHeader *header)
{
- struct vmw_resource_val_node *ctx_node = sw_context->dx_ctx_node;
+ struct vmw_ctx_validation_info *ctx_node = sw_context->dx_ctx_node;
struct vmw_resource *res;
/*
* This is based on the fact that all affected define commands have
@@ -2834,10 +2677,9 @@ static int vmw_cmd_dx_so_define(struct vmw_private *dev_priv,
}
so_type = vmw_so_cmd_to_type(header->id);
- res = vmw_context_cotable(ctx_node->res, vmw_so_cotables[so_type]);
+ res = vmw_context_cotable(ctx_node->ctx, vmw_so_cotables[so_type]);
cmd = container_of(header, typeof(*cmd), header);
ret = vmw_cotable_notify(res, cmd->defined_id);
- vmw_resource_unreference(&res);
return ret;
}
@@ -2882,7 +2724,7 @@ static int vmw_cmd_dx_cid_check(struct vmw_private *dev_priv,
struct vmw_sw_context *sw_context,
SVGA3dCmdHeader *header)
{
- struct vmw_resource_val_node *ctx_node = sw_context->dx_ctx_node;
+ struct vmw_ctx_validation_info *ctx_node = sw_context->dx_ctx_node;
if (unlikely(ctx_node == NULL)) {
DRM_ERROR("DX Context not set.\n");
@@ -2907,7 +2749,7 @@ static int vmw_cmd_dx_view_remove(struct vmw_private *dev_priv,
struct vmw_sw_context *sw_context,
SVGA3dCmdHeader *header)
{
- struct vmw_resource_val_node *ctx_node = sw_context->dx_ctx_node;
+ struct vmw_ctx_validation_info *ctx_node = sw_context->dx_ctx_node;
struct {
SVGA3dCmdHeader header;
union vmw_view_destroy body;
@@ -2934,7 +2776,7 @@ static int vmw_cmd_dx_view_remove(struct vmw_private *dev_priv,
* relocation to conditionally make this command a NOP to avoid
* device errors.
*/
- return vmw_resource_relocation_add(&sw_context->res_relocations,
+ return vmw_resource_relocation_add(sw_context,
view,
vmw_ptr_diff(sw_context->buf_start,
&cmd->header.id),
@@ -2953,7 +2795,7 @@ static int vmw_cmd_dx_define_shader(struct vmw_private *dev_priv,
struct vmw_sw_context *sw_context,
SVGA3dCmdHeader *header)
{
- struct vmw_resource_val_node *ctx_node = sw_context->dx_ctx_node;
+ struct vmw_ctx_validation_info *ctx_node = sw_context->dx_ctx_node;
struct vmw_resource *res;
struct {
SVGA3dCmdHeader header;
@@ -2966,13 +2808,12 @@ static int vmw_cmd_dx_define_shader(struct vmw_private *dev_priv,
return -EINVAL;
}
- res = vmw_context_cotable(ctx_node->res, SVGA_COTABLE_DXSHADER);
+ res = vmw_context_cotable(ctx_node->ctx, SVGA_COTABLE_DXSHADER);
ret = vmw_cotable_notify(res, cmd->body.shaderId);
- vmw_resource_unreference(&res);
if (ret)
return ret;
- return vmw_dx_shader_add(sw_context->man, ctx_node->res,
+ return vmw_dx_shader_add(sw_context->man, ctx_node->ctx,
cmd->body.shaderId, cmd->body.type,
&sw_context->staged_cmd_res);
}
@@ -2989,7 +2830,7 @@ static int vmw_cmd_dx_destroy_shader(struct vmw_private *dev_priv,
struct vmw_sw_context *sw_context,
SVGA3dCmdHeader *header)
{
- struct vmw_resource_val_node *ctx_node = sw_context->dx_ctx_node;
+ struct vmw_ctx_validation_info *ctx_node = sw_context->dx_ctx_node;
struct {
SVGA3dCmdHeader header;
SVGA3dCmdDXDestroyShader body;
@@ -3021,8 +2862,7 @@ static int vmw_cmd_dx_bind_shader(struct vmw_private *dev_priv,
struct vmw_sw_context *sw_context,
SVGA3dCmdHeader *header)
{
- struct vmw_resource_val_node *ctx_node;
- struct vmw_resource_val_node *res_node;
+ struct vmw_resource *ctx;
struct vmw_resource *res;
struct {
SVGA3dCmdHeader header;
@@ -3033,38 +2873,33 @@ static int vmw_cmd_dx_bind_shader(struct vmw_private *dev_priv,
if (cmd->body.cid != SVGA3D_INVALID_ID) {
ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
user_context_converter,
- &cmd->body.cid, &ctx_node);
+ &cmd->body.cid, &ctx);
if (ret)
return ret;
} else {
- ctx_node = sw_context->dx_ctx_node;
- if (!ctx_node) {
+ if (!sw_context->dx_ctx_node) {
DRM_ERROR("DX Context not set.\n");
return -EINVAL;
}
+ ctx = sw_context->dx_ctx_node->ctx;
}
- res = vmw_shader_lookup(vmw_context_res_man(ctx_node->res),
+ res = vmw_shader_lookup(vmw_context_res_man(ctx),
cmd->body.shid, 0);
if (IS_ERR(res)) {
DRM_ERROR("Could not find shader to bind.\n");
return PTR_ERR(res);
}
- ret = vmw_resource_val_add(sw_context, res, &res_node);
+ ret = vmw_execbuf_res_noctx_val_add(sw_context, res);
if (ret) {
DRM_ERROR("Error creating resource validation node.\n");
- goto out_unref;
+ return ret;
}
-
- ret = vmw_cmd_res_switch_backup(dev_priv, sw_context, res_node,
- &cmd->body.mobid,
- cmd->body.offsetInBytes);
-out_unref:
- vmw_resource_unreference(&res);
-
- return ret;
+ return vmw_cmd_res_switch_backup(dev_priv, sw_context, res,
+ &cmd->body.mobid,
+ cmd->body.offsetInBytes);
}
/**
@@ -3083,8 +2918,8 @@ static int vmw_cmd_dx_genmips(struct vmw_private *dev_priv,
SVGA3dCmdDXGenMips body;
} *cmd = container_of(header, typeof(*cmd), header);
- return vmw_view_id_val_add(sw_context, vmw_view_sr,
- cmd->body.shaderResourceViewId);
+ return PTR_RET(vmw_view_id_val_add(sw_context, vmw_view_sr,
+ cmd->body.shaderResourceViewId));
}
/**
@@ -3638,20 +3473,18 @@ static int vmw_cmd_check_all(struct vmw_private *dev_priv,
static void vmw_free_relocations(struct vmw_sw_context *sw_context)
{
- sw_context->cur_reloc = 0;
+ /* Memory is validation context memory, so no need to free it */
+
+ INIT_LIST_HEAD(&sw_context->bo_relocations);
}
static void vmw_apply_relocations(struct vmw_sw_context *sw_context)
{
- uint32_t i;
struct vmw_relocation *reloc;
- struct ttm_validate_buffer *validate;
struct ttm_buffer_object *bo;
- for (i = 0; i < sw_context->cur_reloc; ++i) {
- reloc = &sw_context->relocs[i];
- validate = &sw_context->val_bufs[reloc->index].base;
- bo = validate->bo;
+ list_for_each_entry(reloc, &sw_context->bo_relocations, head) {
+ bo = &reloc->vbo->base;
switch (bo->mem.mem_type) {
case TTM_PL_VRAM:
reloc->location->offset += bo->offset;
@@ -3670,110 +3503,6 @@ static void vmw_apply_relocations(struct vmw_sw_context *sw_context)
vmw_free_relocations(sw_context);
}
-/**
- * vmw_resource_list_unrefererence - Free up a resource list and unreference
- * all resources referenced by it.
- *
- * @list: The resource list.
- */
-static void vmw_resource_list_unreference(struct vmw_sw_context *sw_context,
- struct list_head *list)
-{
- struct vmw_resource_val_node *val, *val_next;
-
- /*
- * Drop references to resources held during command submission.
- */
-
- list_for_each_entry_safe(val, val_next, list, head) {
- list_del_init(&val->head);
- vmw_resource_unreference(&val->res);
-
- if (val->staged_bindings) {
- if (val->staged_bindings != sw_context->staged_bindings)
- vmw_binding_state_free(val->staged_bindings);
- else
- sw_context->staged_bindings_inuse = false;
- val->staged_bindings = NULL;
- }
-
- kfree(val);
- }
-}
-
-static void vmw_clear_validations(struct vmw_sw_context *sw_context)
-{
- struct vmw_validate_buffer *entry, *next;
- struct vmw_resource_val_node *val;
-
- /*
- * Drop references to DMA buffers held during command submission.
- */
- list_for_each_entry_safe(entry, next, &sw_context->validate_nodes,
- base.head) {
- list_del(&entry->base.head);
- ttm_bo_unref(&entry->base.bo);
- (void) drm_ht_remove_item(&sw_context->res_ht, &entry->hash);
- sw_context->cur_val_buf--;
- }
- BUG_ON(sw_context->cur_val_buf != 0);
-
- list_for_each_entry(val, &sw_context->resource_list, head)
- (void) drm_ht_remove_item(&sw_context->res_ht, &val->hash);
-}
-
-int vmw_validate_single_buffer(struct vmw_private *dev_priv,
- struct ttm_buffer_object *bo,
- bool interruptible,
- bool validate_as_mob)
-{
- struct vmw_buffer_object *vbo =
- container_of(bo, struct vmw_buffer_object, base);
- struct ttm_operation_ctx ctx = { interruptible, false };
- int ret;
-
- if (vbo->pin_count > 0)
- return 0;
-
- if (validate_as_mob)
- return ttm_bo_validate(bo, &vmw_mob_placement, &ctx);
-
- /**
- * Put BO in VRAM if there is space, otherwise as a GMR.
- * If there is no space in VRAM and GMR ids are all used up,
- * start evicting GMRs to make room. If the DMA buffer can't be
- * used as a GMR, this will return -ENOMEM.
- */
-
- ret = ttm_bo_validate(bo, &vmw_vram_gmr_placement, &ctx);
- if (likely(ret == 0 || ret == -ERESTARTSYS))
- return ret;
-
- /**
- * If that failed, try VRAM again, this time evicting
- * previous contents.
- */
-
- ret = ttm_bo_validate(bo, &vmw_vram_placement, &ctx);
- return ret;
-}
-
-static int vmw_validate_buffers(struct vmw_private *dev_priv,
- struct vmw_sw_context *sw_context)
-{
- struct vmw_validate_buffer *entry;
- int ret;
-
- list_for_each_entry(entry, &sw_context->validate_nodes, base.head) {
- ret = vmw_validate_single_buffer(dev_priv, entry->base.bo,
- true,
- entry->validate_as_mob);
- if (unlikely(ret != 0))
- return ret;
- }
- return 0;
-}
-
static int vmw_resize_cmd_bounce(struct vmw_sw_context *sw_context,
uint32_t size)
{
@@ -3946,7 +3675,7 @@ static int vmw_execbuf_submit_fifo(struct vmw_private *dev_priv,
if (sw_context->dx_ctx_node)
cmd = vmw_fifo_reserve_dx(dev_priv, command_size,
- sw_context->dx_ctx_node->res->id);
+ sw_context->dx_ctx_node->ctx->id);
else
cmd = vmw_fifo_reserve(dev_priv, command_size);
if (!cmd) {
@@ -3980,7 +3709,7 @@ static int vmw_execbuf_submit_cmdbuf(struct vmw_private *dev_priv,
u32 command_size,
struct vmw_sw_context *sw_context)
{
- u32 id = ((sw_context->dx_ctx_node) ? sw_context->dx_ctx_node->res->id :
+ u32 id = ((sw_context->dx_ctx_node) ? sw_context->dx_ctx_node->ctx->id :
SVGA3D_INVALID_ID);
void *cmd = vmw_cmdbuf_reserve(dev_priv->cman, command_size,
id, false, header);
@@ -4057,31 +3786,35 @@ static int vmw_execbuf_tie_context(struct vmw_private *dev_priv,
struct vmw_sw_context *sw_context,
uint32_t handle)
{
- struct vmw_resource_val_node *ctx_node;
struct vmw_resource *res;
int ret;
+ unsigned int size;
if (handle == SVGA3D_INVALID_ID)
return 0;
- ret = vmw_user_resource_lookup_handle(dev_priv, sw_context->fp->tfile,
- handle, user_context_converter,
- &res);
- if (unlikely(ret != 0)) {
+ size = vmw_execbuf_res_size(dev_priv, vmw_res_dx_context);
+ ret = vmw_validation_preload_res(sw_context->ctx, size);
+ if (ret)
+ return ret;
+
+ res = vmw_user_resource_noref_lookup_handle
+ (dev_priv, sw_context->fp->tfile, handle,
+ user_context_converter);
+ if (unlikely(IS_ERR(res))) {
DRM_ERROR("Could not find or user DX context 0x%08x.\n",
(unsigned) handle);
- return ret;
+ return PTR_ERR(res);
}
- ret = vmw_resource_val_add(sw_context, res, &ctx_node);
+ ret = vmw_execbuf_res_noref_val_add(sw_context, res);
if (unlikely(ret != 0))
- goto out_err;
+ return ret;
- sw_context->dx_ctx_node = ctx_node;
+ sw_context->dx_ctx_node = vmw_execbuf_info_from_res(sw_context, res);
sw_context->man = vmw_context_res_man(res);
-out_err:
- vmw_resource_unreference(&res);
- return ret;
+
+ return 0;
}
int vmw_execbuf_process(struct drm_file *file_priv,
@@ -4097,15 +3830,12 @@ int vmw_execbuf_process(struct drm_file *file_priv,
{
struct vmw_sw_context *sw_context = &dev_priv->ctx;
struct vmw_fence_obj *fence = NULL;
- struct vmw_resource *error_resource;
- struct list_head resource_list;
struct vmw_cmdbuf_header *header;
- struct ww_acquire_ctx ticket;
uint32_t handle;
int ret;
int32_t out_fence_fd = -1;
struct sync_file *sync_file = NULL;
-
+ DECLARE_VAL_CONTEXT(val_ctx, &sw_context->res_ht, 1);
if (flags & DRM_VMW_EXECBUF_FLAG_EXPORT_FENCE_FD) {
out_fence_fd = get_unused_fd_flags(O_CLOEXEC);
@@ -4157,10 +3887,7 @@ int vmw_execbuf_process(struct drm_file *file_priv,
sw_context->kernel = true;
sw_context->fp = vmw_fpriv(file_priv);
- sw_context->cur_reloc = 0;
- sw_context->cur_val_buf = 0;
- INIT_LIST_HEAD(&sw_context->resource_list);
- INIT_LIST_HEAD(&sw_context->ctx_resource_list);
+ INIT_LIST_HEAD(&sw_context->ctx_list);
sw_context->cur_query_bo = dev_priv->pinned_bo;
sw_context->last_query_ctx = NULL;
sw_context->needs_post_query_barrier = false;
@@ -4168,8 +3895,8 @@ int vmw_execbuf_process(struct drm_file *file_priv,
sw_context->dx_query_mob = NULL;
sw_context->dx_query_ctx = NULL;
memset(sw_context->res_cache, 0, sizeof(sw_context->res_cache));
- INIT_LIST_HEAD(&sw_context->validate_nodes);
INIT_LIST_HEAD(&sw_context->res_relocations);
+ INIT_LIST_HEAD(&sw_context->bo_relocations);
if (sw_context->staged_bindings)
vmw_binding_state_reset(sw_context->staged_bindings);
@@ -4180,24 +3907,13 @@ int vmw_execbuf_process(struct drm_file *file_priv,
sw_context->res_ht_initialized = true;
}
INIT_LIST_HEAD(&sw_context->staged_cmd_res);
- INIT_LIST_HEAD(&resource_list);
+ sw_context->ctx = &val_ctx;
ret = vmw_execbuf_tie_context(dev_priv, sw_context, dx_context_handle);
- if (unlikely(ret != 0)) {
- list_splice_init(&sw_context->ctx_resource_list,
- &sw_context->resource_list);
+ if (unlikely(ret != 0))
goto out_err_nores;
- }
ret = vmw_cmd_check_all(dev_priv, sw_context, kernel_commands,
command_size);
- /*
- * Merge the resource lists before checking the return status
- * from vmd_cmd_check_all so that all the open hashtabs will
- * be handled properly even if vmw_cmd_check_all fails.
- */
- list_splice_init(&sw_context->ctx_resource_list,
- &sw_context->resource_list);
-
if (unlikely(ret != 0))
goto out_err_nores;
@@ -4205,18 +3921,18 @@ int vmw_execbuf_process(struct drm_file *file_priv,
if (unlikely(ret != 0))
goto out_err_nores;
- ret = ttm_eu_reserve_buffers(&ticket, &sw_context->validate_nodes,
- true, NULL);
+ ret = vmw_validation_bo_reserve(&val_ctx, true);
if (unlikely(ret != 0))
goto out_err_nores;
- ret = vmw_validate_buffers(dev_priv, sw_context);
+ ret = vmw_validation_bo_validate(&val_ctx, true);
if (unlikely(ret != 0))
goto out_err;
- ret = vmw_resources_validate(sw_context);
+ ret = vmw_validation_res_validate(&val_ctx, true);
if (unlikely(ret != 0))
goto out_err;
+ vmw_validation_drop_ht(&val_ctx);
ret = mutex_lock_interruptible(&dev_priv->binding_mutex);
if (unlikely(ret != 0)) {
@@ -4255,17 +3971,16 @@ int vmw_execbuf_process(struct drm_file *file_priv,
if (ret != 0)
DRM_ERROR("Fence submission error. Syncing.\n");
- vmw_resources_unreserve(sw_context, false);
+ vmw_execbuf_bindings_commit(sw_context, false);
+ vmw_bind_dx_query_mob(sw_context);
+ vmw_validation_res_unreserve(&val_ctx, false);
- ttm_eu_fence_buffer_objects(&ticket, &sw_context->validate_nodes,
- (void *) fence);
+ vmw_validation_bo_fence(sw_context->ctx, fence);
if (unlikely(dev_priv->pinned_bo != NULL &&
!dev_priv->query_cid_valid))
__vmw_execbuf_release_pinned_bo(dev_priv, fence);
- vmw_clear_validations(sw_context);
-
/*
* If anything fails here, give up trying to export the fence
* and do a sync since the user mode will not be able to sync
@@ -4300,7 +4015,6 @@ int vmw_execbuf_process(struct drm_file *file_priv,
vmw_fence_obj_unreference(&fence);
}
- list_splice_init(&sw_context->resource_list, &resource_list);
vmw_cmdbuf_res_commit(&sw_context->staged_cmd_res);
mutex_unlock(&dev_priv->cmdbuf_mutex);
@@ -4308,36 +4022,33 @@ int vmw_execbuf_process(struct drm_file *file_priv,
* Unreference resources outside of the cmdbuf_mutex to
* avoid deadlocks in resource destruction paths.
*/
- vmw_resource_list_unreference(sw_context, &resource_list);
+ vmw_validation_unref_lists(&val_ctx);
return 0;
out_unlock_binding:
mutex_unlock(&dev_priv->binding_mutex);
out_err:
- ttm_eu_backoff_reservation(&ticket, &sw_context->validate_nodes);
+ vmw_validation_bo_backoff(&val_ctx);
out_err_nores:
- vmw_resources_unreserve(sw_context, true);
+ vmw_execbuf_bindings_commit(sw_context, true);
+ vmw_validation_res_unreserve(&val_ctx, true);
vmw_resource_relocations_free(&sw_context->res_relocations);
vmw_free_relocations(sw_context);
- vmw_clear_validations(sw_context);
if (unlikely(dev_priv->pinned_bo != NULL &&
!dev_priv->query_cid_valid))
__vmw_execbuf_release_pinned_bo(dev_priv, NULL);
out_unlock:
- list_splice_init(&sw_context->resource_list, &resource_list);
- error_resource = sw_context->error_resource;
- sw_context->error_resource = NULL;
vmw_cmdbuf_res_revert(&sw_context->staged_cmd_res);
+ vmw_validation_drop_ht(&val_ctx);
+ WARN_ON(!list_empty(&sw_context->ctx_list));
mutex_unlock(&dev_priv->cmdbuf_mutex);
/*
* Unreference resources outside of the cmdbuf_mutex to
* avoid deadlocks in resource destruction paths.
*/
- vmw_resource_list_unreference(sw_context, &resource_list);
- if (unlikely(error_resource != NULL))
- vmw_resource_unreference(&error_resource);
+ vmw_validation_unref_lists(&val_ctx);
out_free_header:
if (header)
vmw_cmdbuf_header_free(header);
@@ -4398,38 +4109,31 @@ void __vmw_execbuf_release_pinned_bo(struct vmw_private *dev_priv,
struct vmw_fence_obj *fence)
{
int ret = 0;
- struct list_head validate_list;
- struct ttm_validate_buffer pinned_val, query_val;
struct vmw_fence_obj *lfence = NULL;
- struct ww_acquire_ctx ticket;
+ DECLARE_VAL_CONTEXT(val_ctx, NULL, 0);
if (dev_priv->pinned_bo == NULL)
goto out_unlock;
- INIT_LIST_HEAD(&validate_list);
-
- pinned_val.bo = ttm_bo_reference(&dev_priv->pinned_bo->base);
- pinned_val.shared = false;
- list_add_tail(&pinned_val.head, &validate_list);
+ ret = vmw_validation_add_bo(&val_ctx, dev_priv->pinned_bo, false,
+ false);
+ if (ret)
+ goto out_no_reserve;
- query_val.bo = ttm_bo_reference(&dev_priv->dummy_query_bo->base);
- query_val.shared = false;
- list_add_tail(&query_val.head, &validate_list);
+ ret = vmw_validation_add_bo(&val_ctx, dev_priv->dummy_query_bo, false,
+ false);
+ if (ret)
+ goto out_no_reserve;
- ret = ttm_eu_reserve_buffers(&ticket, &validate_list,
- false, NULL);
- if (unlikely(ret != 0)) {
- vmw_execbuf_unpin_panic(dev_priv);
+ ret = vmw_validation_bo_reserve(&val_ctx, false);
+ if (ret)
goto out_no_reserve;
- }
if (dev_priv->query_cid_valid) {
BUG_ON(fence != NULL);
ret = vmw_fifo_emit_dummy_query(dev_priv, dev_priv->query_cid);
- if (unlikely(ret != 0)) {
- vmw_execbuf_unpin_panic(dev_priv);
+ if (ret)
goto out_no_emit;
- }
dev_priv->query_cid_valid = false;
}
@@ -4443,22 +4147,22 @@ void __vmw_execbuf_release_pinned_bo(struct vmw_private *dev_priv,
NULL);
fence = lfence;
}
- ttm_eu_fence_buffer_objects(&ticket, &validate_list, (void *) fence);
+ vmw_validation_bo_fence(&val_ctx, fence);
if (lfence != NULL)
vmw_fence_obj_unreference(&lfence);
- ttm_bo_unref(&query_val.bo);
- ttm_bo_unref(&pinned_val.bo);
+ vmw_validation_unref_lists(&val_ctx);
vmw_bo_unreference(&dev_priv->pinned_bo);
out_unlock:
return;
out_no_emit:
- ttm_eu_backoff_reservation(&ticket, &validate_list);
+ vmw_validation_bo_backoff(&val_ctx);
out_no_reserve:
- ttm_bo_unref(&query_val.bo);
- ttm_bo_unref(&pinned_val.bo);
+ vmw_validation_unref_lists(&val_ctx);
+ vmw_execbuf_unpin_panic(dev_priv);
vmw_bo_unreference(&dev_priv->pinned_bo);
+
}
/**
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c b/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c
index 3d546d409334..f87261545f2c 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c
@@ -306,7 +306,8 @@ struct vmw_fence_manager *vmw_fence_manager_init(struct vmw_private *dev_priv)
INIT_LIST_HEAD(&fman->cleanup_list);
INIT_WORK(&fman->work, &vmw_fence_work_func);
fman->fifo_down = true;
- fman->user_fence_size = ttm_round_pot(sizeof(struct vmw_user_fence));
+ fman->user_fence_size = ttm_round_pot(sizeof(struct vmw_user_fence)) +
+ TTM_OBJ_EXTRA_SIZE;
fman->fence_size = ttm_round_pot(sizeof(struct vmw_fence_obj));
fman->event_fence_action_size =
ttm_round_pot(sizeof(struct vmw_event_fence_action));
@@ -650,7 +651,7 @@ int vmw_user_fence_create(struct drm_file *file_priv,
}
*p_fence = &ufence->fence;
- *p_handle = ufence->base.hash.key;
+ *p_handle = ufence->base.handle;
return 0;
out_err:
@@ -1137,7 +1138,7 @@ int vmw_fence_event_ioctl(struct drm_device *dev, void *data,
"object.\n");
goto out_no_ref_obj;
}
- handle = base->hash.key;
+ handle = base->handle;
}
ttm_base_object_unref(&base);
}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
index 292e48feba83..dca04d4246ea 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
@@ -2575,88 +2575,31 @@ int vmw_kms_helper_dirty(struct vmw_private *dev_priv,
}
/**
- * vmw_kms_helper_buffer_prepare - Reserve and validate a buffer object before
- * command submission.
- *
- * @dev_priv. Pointer to a device private structure.
- * @buf: The buffer object
- * @interruptible: Whether to perform waits as interruptible.
- * @validate_as_mob: Whether the buffer should be validated as a MOB. If false,
- * The buffer will be validated as a GMR. Already pinned buffers will not be
- * validated.
- *
- * Returns 0 on success, negative error code on failure, -ERESTARTSYS if
- * interrupted by a signal.
+ * vmw_kms_helper_validation_finish - Helper for post KMS command submission
+ * cleanup and fencing
+ * @dev_priv: Pointer to the device-private struct
+ * @file_priv: Pointer identifying the client when user-space fencing is used
+ * @ctx: Pointer to the validation context
+ * @out_fence: If non-NULL, returned refcounted fence-pointer
+ * @user_fence_rep: If non-NULL, pointer to user-space address area
+ * in which to copy user-space fence info
*/
-int vmw_kms_helper_buffer_prepare(struct vmw_private *dev_priv,
- struct vmw_buffer_object *buf,
- bool interruptible,
- bool validate_as_mob,
- bool for_cpu_blit)
-{
- struct ttm_operation_ctx ctx = {
- .interruptible = interruptible,
- .no_wait_gpu = false};
- struct ttm_buffer_object *bo = &buf->base;
- int ret;
-
- ttm_bo_reserve(bo, false, false, NULL);
- if (for_cpu_blit)
- ret = ttm_bo_validate(bo, &vmw_nonfixed_placement, &ctx);
- else
- ret = vmw_validate_single_buffer(dev_priv, bo, interruptible,
- validate_as_mob);
- if (ret)
- ttm_bo_unreserve(bo);
-
- return ret;
-}
-
-/**
- * vmw_kms_helper_buffer_revert - Undo the actions of
- * vmw_kms_helper_buffer_prepare.
- *
- * @res: Pointer to the buffer object.
- *
- * Helper to be used if an error forces the caller to undo the actions of
- * vmw_kms_helper_buffer_prepare.
- */
-void vmw_kms_helper_buffer_revert(struct vmw_buffer_object *buf)
-{
- if (buf)
- ttm_bo_unreserve(&buf->base);
-}
-
-/**
- * vmw_kms_helper_buffer_finish - Unreserve and fence a buffer object after
- * kms command submission.
- *
- * @dev_priv: Pointer to a device private structure.
- * @file_priv: Pointer to a struct drm_file representing the caller's
- * connection. Must be set to NULL if @user_fence_rep is NULL, and conversely
- * if non-NULL, @user_fence_rep must be non-NULL.
- * @buf: The buffer object.
- * @out_fence: Optional pointer to a fence pointer. If non-NULL, a
- * ref-counted fence pointer is returned here.
- * @user_fence_rep: Optional pointer to a user-space provided struct
- * drm_vmw_fence_rep. If provided, @file_priv must also be provided and the
- * function copies fence data to user-space in a fail-safe manner.
- */
-void vmw_kms_helper_buffer_finish(struct vmw_private *dev_priv,
- struct drm_file *file_priv,
- struct vmw_buffer_object *buf,
- struct vmw_fence_obj **out_fence,
- struct drm_vmw_fence_rep __user *
- user_fence_rep)
-{
- struct vmw_fence_obj *fence;
+void vmw_kms_helper_validation_finish(struct vmw_private *dev_priv,
+ struct drm_file *file_priv,
+ struct vmw_validation_context *ctx,
+ struct vmw_fence_obj **out_fence,
+ struct drm_vmw_fence_rep __user *
+ user_fence_rep)
+{
+ struct vmw_fence_obj *fence = NULL;
uint32_t handle;
int ret;
- ret = vmw_execbuf_fence_commands(file_priv, dev_priv, &fence,
- file_priv ? &handle : NULL);
- if (buf)
- vmw_bo_fence_single(&buf->base, fence);
+ if (file_priv || user_fence_rep || vmw_validation_has_bos(ctx) ||
+ out_fence)
+ ret = vmw_execbuf_fence_commands(file_priv, dev_priv, &fence,
+ file_priv ? &handle : NULL);
+ vmw_validation_done(ctx, fence);
if (file_priv)
vmw_execbuf_copy_fence_user(dev_priv, vmw_fpriv(file_priv),
ret, user_fence_rep, fence,
@@ -2665,106 +2608,6 @@ void vmw_kms_helper_buffer_finish(struct vmw_private *dev_priv,
*out_fence = fence;
else
vmw_fence_obj_unreference(&fence);
-
- vmw_kms_helper_buffer_revert(buf);
-}
-
-
-/**
- * vmw_kms_helper_resource_revert - Undo the actions of
- * vmw_kms_helper_resource_prepare.
- *
- * @res: Pointer to the resource. Typically a surface.
- *
- * Helper to be used if an error forces the caller to undo the actions of
- * vmw_kms_helper_resource_prepare.
- */
-void vmw_kms_helper_resource_revert(struct vmw_validation_ctx *ctx)
-{
- struct vmw_resource *res = ctx->res;
-
- vmw_kms_helper_buffer_revert(ctx->buf);
- vmw_bo_unreference(&ctx->buf);
- vmw_resource_unreserve(res, false, NULL, 0);
- mutex_unlock(&res->dev_priv->cmdbuf_mutex);
-}
-
-/**
- * vmw_kms_helper_resource_prepare - Reserve and validate a resource before
- * command submission.
- *
- * @res: Pointer to the resource. Typically a surface.
- * @interruptible: Whether to perform waits as interruptible.
- *
- * Reserves and validates also the backup buffer if a guest-backed resource.
- * Returns 0 on success, negative error code on failure. -ERESTARTSYS if
- * interrupted by a signal.
- */
-int vmw_kms_helper_resource_prepare(struct vmw_resource *res,
- bool interruptible,
- struct vmw_validation_ctx *ctx)
-{
- int ret = 0;
-
- ctx->buf = NULL;
- ctx->res = res;
-
- if (interruptible)
- ret = mutex_lock_interruptible(&res->dev_priv->cmdbuf_mutex);
- else
- mutex_lock(&res->dev_priv->cmdbuf_mutex);
-
- if (unlikely(ret != 0))
- return -ERESTARTSYS;
-
- ret = vmw_resource_reserve(res, interruptible, false);
- if (ret)
- goto out_unlock;
-
- if (res->backup) {
- ret = vmw_kms_helper_buffer_prepare(res->dev_priv, res->backup,
- interruptible,
- res->dev_priv->has_mob,
- false);
- if (ret)
- goto out_unreserve;
-
- ctx->buf = vmw_bo_reference(res->backup);
- }
- ret = vmw_resource_validate(res);
- if (ret)
- goto out_revert;
- return 0;
-
-out_revert:
- vmw_kms_helper_buffer_revert(ctx->buf);
-out_unreserve:
- vmw_resource_unreserve(res, false, NULL, 0);
-out_unlock:
- mutex_unlock(&res->dev_priv->cmdbuf_mutex);
- return ret;
-}
-
-/**
- * vmw_kms_helper_resource_finish - Unreserve and fence a resource after
- * kms command submission.
- *
- * @res: Pointer to the resource. Typically a surface.
- * @out_fence: Optional pointer to a fence pointer. If non-NULL, a
- * ref-counted fence pointer is returned here.
- */
-void vmw_kms_helper_resource_finish(struct vmw_validation_ctx *ctx,
- struct vmw_fence_obj **out_fence)
-{
- struct vmw_resource *res = ctx->res;
-
- if (ctx->buf || out_fence)
- vmw_kms_helper_buffer_finish(res->dev_priv, NULL, ctx->buf,
- out_fence, NULL);
-
- vmw_bo_unreference(&ctx->buf);
- vmw_resource_unreserve(res, false, NULL, 0);
- mutex_unlock(&res->dev_priv->cmdbuf_mutex);
}
/**
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.h b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.h
index 31311298ec0b..76ec570c0684 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.h
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.h
@@ -308,24 +308,12 @@ int vmw_kms_helper_dirty(struct vmw_private *dev_priv,
int increment,
struct vmw_kms_dirty *dirty);
-int vmw_kms_helper_buffer_prepare(struct vmw_private *dev_priv,
- struct vmw_buffer_object *buf,
- bool interruptible,
- bool validate_as_mob,
- bool for_cpu_blit);
-void vmw_kms_helper_buffer_revert(struct vmw_buffer_object *buf);
-void vmw_kms_helper_buffer_finish(struct vmw_private *dev_priv,
- struct drm_file *file_priv,
- struct vmw_buffer_object *buf,
- struct vmw_fence_obj **out_fence,
- struct drm_vmw_fence_rep __user *
- user_fence_rep);
-int vmw_kms_helper_resource_prepare(struct vmw_resource *res,
- bool interruptible,
- struct vmw_validation_ctx *ctx);
-void vmw_kms_helper_resource_revert(struct vmw_validation_ctx *ctx);
-void vmw_kms_helper_resource_finish(struct vmw_validation_ctx *ctx,
- struct vmw_fence_obj **out_fence);
+void vmw_kms_helper_validation_finish(struct vmw_private *dev_priv,
+ struct drm_file *file_priv,
+ struct vmw_validation_context *ctx,
+ struct vmw_fence_obj **out_fence,
+ struct drm_vmw_fence_rep __user *
+ user_fence_rep);
int vmw_kms_readback(struct vmw_private *dev_priv,
struct drm_file *file_priv,
struct vmw_framebuffer *vfb,
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_prime.c b/drivers/gpu/drm/vmwgfx/vmwgfx_prime.c
index 0861c821a7fe..e420675e8db3 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_prime.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_prime.c
@@ -31,8 +31,8 @@
*/
#include "vmwgfx_drv.h"
+#include "ttm_object.h"
#include <linux/dma-buf.h>
-#include <drm/ttm/ttm_object.h>
/*
* DMA-BUF attach- and mapping methods. No need to implement
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
index 92003ea5a219..8a029bade32a 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
@@ -58,11 +58,11 @@ void vmw_resource_release_id(struct vmw_resource *res)
struct vmw_private *dev_priv = res->dev_priv;
struct idr *idr = &dev_priv->res_idr[res->func->res_type];
- write_lock(&dev_priv->resource_lock);
+ spin_lock(&dev_priv->resource_lock);
if (res->id != -1)
idr_remove(idr, res->id);
res->id = -1;
- write_unlock(&dev_priv->resource_lock);
+ spin_unlock(&dev_priv->resource_lock);
}
static void vmw_resource_release(struct kref *kref)
@@ -73,10 +73,9 @@ static void vmw_resource_release(struct kref *kref)
int id;
struct idr *idr = &dev_priv->res_idr[res->func->res_type];
- write_lock(&dev_priv->resource_lock);
- res->avail = false;
+ spin_lock(&dev_priv->resource_lock);
list_del_init(&res->lru_head);
- write_unlock(&dev_priv->resource_lock);
+ spin_unlock(&dev_priv->resource_lock);
if (res->backup) {
struct ttm_buffer_object *bo = &res->backup->base;
@@ -108,10 +107,10 @@ static void vmw_resource_release(struct kref *kref)
else
kfree(res);
- write_lock(&dev_priv->resource_lock);
+ spin_lock(&dev_priv->resource_lock);
if (id != -1)
idr_remove(idr, id);
- write_unlock(&dev_priv->resource_lock);
+ spin_unlock(&dev_priv->resource_lock);
}
void vmw_resource_unreference(struct vmw_resource **p_res)
@@ -140,13 +139,13 @@ int vmw_resource_alloc_id(struct vmw_resource *res)
BUG_ON(res->id != -1);
idr_preload(GFP_KERNEL);
- write_lock(&dev_priv->resource_lock);
+ spin_lock(&dev_priv->resource_lock);
ret = idr_alloc(idr, res, 1, 0, GFP_NOWAIT);
if (ret >= 0)
res->id = ret;
- write_unlock(&dev_priv->resource_lock);
+ spin_unlock(&dev_priv->resource_lock);
idr_preload_end();
return ret < 0 ? ret : 0;
}
@@ -170,7 +169,6 @@ int vmw_resource_init(struct vmw_private *dev_priv, struct vmw_resource *res,
kref_init(&res->kref);
res->hw_destroy = NULL;
res->res_free = res_free;
- res->avail = false;
res->dev_priv = dev_priv;
res->func = func;
INIT_LIST_HEAD(&res->lru_head);
@@ -187,28 +185,6 @@ int vmw_resource_init(struct vmw_private *dev_priv, struct vmw_resource *res,
return vmw_resource_alloc_id(res);
}
-/**
- * vmw_resource_activate
- *
- * @res: Pointer to the newly created resource
- * @hw_destroy: Destroy function. NULL if none.
- *
- * Activate a resource after the hardware has been made aware of it.
- * Set tye destroy function to @destroy. Typically this frees the
- * resource and destroys the hardware resources associated with it.
- * Activate basically means that the function vmw_resource_lookup will
- * find it.
- */
-void vmw_resource_activate(struct vmw_resource *res,
- void (*hw_destroy) (struct vmw_resource *))
-{
- struct vmw_private *dev_priv = res->dev_priv;
-
- write_lock(&dev_priv->resource_lock);
- res->avail = true;
- res->hw_destroy = hw_destroy;
- write_unlock(&dev_priv->resource_lock);
-}
/**
* vmw_user_resource_lookup_handle - lookup a struct resource from a
@@ -243,15 +219,7 @@ int vmw_user_resource_lookup_handle(struct vmw_private *dev_priv,
goto out_bad_resource;
res = converter->base_obj_to_res(base);
-
- read_lock(&dev_priv->resource_lock);
- if (!res->avail || res->res_free != converter->res_free) {
- read_unlock(&dev_priv->resource_lock);
- goto out_bad_resource;
- }
-
kref_get(&res->kref);
- read_unlock(&dev_priv->resource_lock);
*p_res = res;
ret = 0;
@@ -263,6 +231,41 @@ out_bad_resource:
}
/**
+ * vmw_user_resource_lookup_handle - lookup a struct resource from a
+ * TTM user-space handle and perform basic type checks
+ *
+ * @dev_priv: Pointer to a device private struct
+ * @tfile: Pointer to a struct ttm_object_file identifying the caller
+ * @handle: The TTM user-space handle
+ * @converter: Pointer to an object describing the resource type
+ * @p_res: On successful return the location pointed to will contain
+ * a pointer to a refcounted struct vmw_resource.
+ *
+ * If the handle can't be found or is associated with an incorrect resource
+ * type, -EINVAL will be returned.
+ */
+struct vmw_resource *
+vmw_user_resource_noref_lookup_handle(struct vmw_private *dev_priv,
+ struct ttm_object_file *tfile,
+ uint32_t handle,
+ const struct vmw_user_resource_conv
+ *converter)
+{
+ struct ttm_base_object *base;
+
+ base = ttm_base_object_noref_lookup(tfile, handle);
+ if (!base)
+ return ERR_PTR(-ESRCH);
+
+ if (unlikely(ttm_base_object_type(base) != converter->object_type)) {
+ ttm_base_object_noref_release();
+ return ERR_PTR(-EINVAL);
+ }
+
+ return converter->base_obj_to_res(base);
+}
+
+/**
* Helper function that looks either a surface or bo.
*
* The pointer this pointed at by out_surf and out_buf needs to be null.
@@ -422,10 +425,10 @@ void vmw_resource_unreserve(struct vmw_resource *res,
if (!res->func->may_evict || res->id == -1 || res->pin_count)
return;
- write_lock(&dev_priv->resource_lock);
+ spin_lock(&dev_priv->resource_lock);
list_add_tail(&res->lru_head,
&res->dev_priv->res_lru[res->func->res_type]);
- write_unlock(&dev_priv->resource_lock);
+ spin_unlock(&dev_priv->resource_lock);
}
/**
@@ -504,9 +507,9 @@ int vmw_resource_reserve(struct vmw_resource *res, bool interruptible,
struct vmw_private *dev_priv = res->dev_priv;
int ret;
- write_lock(&dev_priv->resource_lock);
+ spin_lock(&dev_priv->resource_lock);
list_del_init(&res->lru_head);
- write_unlock(&dev_priv->resource_lock);
+ spin_unlock(&dev_priv->resource_lock);
if (res->func->needs_backup && res->backup == NULL &&
!no_backup) {
@@ -587,15 +590,18 @@ out_no_unbind:
/**
* vmw_resource_validate - Make a resource up-to-date and visible
* to the device.
- *
- * @res: The resource to make visible to the device.
+ * @res: The resource to make visible to the device.
+ * @intr: Perform waits interruptible if possible.
*
* On succesful return, any backup DMA buffer pointed to by @res->backup will
* be reserved and validated.
* On hardware resource shortage, this function will repeatedly evict
* resources of the same type until the validation succeeds.
+ *
+ * Return: Zero on success, -ERESTARTSYS if interrupted, negative error code
+ * on failure.
*/
-int vmw_resource_validate(struct vmw_resource *res)
+int vmw_resource_validate(struct vmw_resource *res, bool intr)
{
int ret;
struct vmw_resource *evict_res;
@@ -616,12 +622,12 @@ int vmw_resource_validate(struct vmw_resource *res)
if (likely(ret != -EBUSY))
break;
- write_lock(&dev_priv->resource_lock);
+ spin_lock(&dev_priv->resource_lock);
if (list_empty(lru_list) || !res->func->may_evict) {
DRM_ERROR("Out of device device resources "
"for %s.\n", res->func->type_name);
ret = -EBUSY;
- write_unlock(&dev_priv->resource_lock);
+ spin_unlock(&dev_priv->resource_lock);
break;
}
@@ -630,14 +636,14 @@ int vmw_resource_validate(struct vmw_resource *res)
lru_head));
list_del_init(&evict_res->lru_head);
- write_unlock(&dev_priv->resource_lock);
+ spin_unlock(&dev_priv->resource_lock);
/* Trylock backup buffers with a NULL ticket. */
- ret = vmw_resource_do_evict(NULL, evict_res, true);
+ ret = vmw_resource_do_evict(NULL, evict_res, intr);
if (unlikely(ret != 0)) {
- write_lock(&dev_priv->resource_lock);
+ spin_lock(&dev_priv->resource_lock);
list_add_tail(&evict_res->lru_head, lru_list);
- write_unlock(&dev_priv->resource_lock);
+ spin_unlock(&dev_priv->resource_lock);
if (ret == -ERESTARTSYS ||
++err_count > VMW_RES_EVICT_ERR_COUNT) {
vmw_resource_unreference(&evict_res);
@@ -819,7 +825,7 @@ static void vmw_resource_evict_type(struct vmw_private *dev_priv,
struct ww_acquire_ctx ticket;
do {
- write_lock(&dev_priv->resource_lock);
+ spin_lock(&dev_priv->resource_lock);
if (list_empty(lru_list))
goto out_unlock;
@@ -828,14 +834,14 @@ static void vmw_resource_evict_type(struct vmw_private *dev_priv,
list_first_entry(lru_list, struct vmw_resource,
lru_head));
list_del_init(&evict_res->lru_head);
- write_unlock(&dev_priv->resource_lock);
+ spin_unlock(&dev_priv->resource_lock);
/* Wait lock backup buffers with a ticket. */
ret = vmw_resource_do_evict(&ticket, evict_res, false);
if (unlikely(ret != 0)) {
- write_lock(&dev_priv->resource_lock);
+ spin_lock(&dev_priv->resource_lock);
list_add_tail(&evict_res->lru_head, lru_list);
- write_unlock(&dev_priv->resource_lock);
+ spin_unlock(&dev_priv->resource_lock);
if (++err_count > VMW_RES_EVICT_ERR_COUNT) {
vmw_resource_unreference(&evict_res);
return;
@@ -846,7 +852,7 @@ static void vmw_resource_evict_type(struct vmw_private *dev_priv,
} while (1);
out_unlock:
- write_unlock(&dev_priv->resource_lock);
+ spin_unlock(&dev_priv->resource_lock);
}
/**
@@ -914,7 +920,7 @@ int vmw_resource_pin(struct vmw_resource *res, bool interruptible)
/* Do we really need to pin the MOB as well? */
vmw_bo_pin_reserved(vbo, true);
}
- ret = vmw_resource_validate(res);
+ ret = vmw_resource_validate(res, interruptible);
if (vbo)
ttm_bo_unreserve(&vbo->base);
if (ret)
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_resource_priv.h b/drivers/gpu/drm/vmwgfx/vmwgfx_resource_priv.h
index a8c1c5ebd71d..7e19eba0b0b8 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_resource_priv.h
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_resource_priv.h
@@ -30,6 +30,11 @@
#include "vmwgfx_drv.h"
+/*
+ * Extra memory required by the resource id's ida storage, which is allocated
+ * separately from the base object itself. We estimate an on-average 128 bytes
+ * per ida.
+ */
#define VMW_IDA_ACC_SIZE 128
enum vmw_cmdbuf_res_state {
@@ -120,8 +125,6 @@ int vmw_resource_init(struct vmw_private *dev_priv, struct vmw_resource *res,
bool delay_id,
void (*res_free) (struct vmw_resource *res),
const struct vmw_res_func *func);
-void vmw_resource_activate(struct vmw_resource *res,
- void (*hw_destroy) (struct vmw_resource *));
int
vmw_simple_resource_create_ioctl(struct drm_device *dev,
void *data,
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c b/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c
index ad0de7f0cd60..53316b1bda3d 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c
@@ -946,16 +946,20 @@ int vmw_kms_sou_do_surface_dirty(struct vmw_private *dev_priv,
struct vmw_framebuffer_surface *vfbs =
container_of(framebuffer, typeof(*vfbs), base);
struct vmw_kms_sou_surface_dirty sdirty;
- struct vmw_validation_ctx ctx;
+ DECLARE_VAL_CONTEXT(val_ctx, NULL, 0);
int ret;
if (!srf)
srf = &vfbs->surface->res;
- ret = vmw_kms_helper_resource_prepare(srf, true, &ctx);
+ ret = vmw_validation_add_resource(&val_ctx, srf, 0, NULL, NULL);
if (ret)
return ret;
+ ret = vmw_validation_prepare(&val_ctx, &dev_priv->cmdbuf_mutex, true);
+ if (ret)
+ goto out_unref;
+
sdirty.base.fifo_commit = vmw_sou_surface_fifo_commit;
sdirty.base.clip = vmw_sou_surface_clip;
sdirty.base.dev_priv = dev_priv;
@@ -972,9 +976,14 @@ int vmw_kms_sou_do_surface_dirty(struct vmw_private *dev_priv,
ret = vmw_kms_helper_dirty(dev_priv, framebuffer, clips, vclips,
dest_x, dest_y, num_clips, inc,
&sdirty.base);
- vmw_kms_helper_resource_finish(&ctx, out_fence);
+ vmw_kms_helper_validation_finish(dev_priv, NULL, &val_ctx, out_fence,
+ NULL);
return ret;
+
+out_unref:
+ vmw_validation_unref_lists(&val_ctx);
+ return ret;
}
/**
@@ -1051,13 +1060,17 @@ int vmw_kms_sou_do_bo_dirty(struct vmw_private *dev_priv,
container_of(framebuffer, struct vmw_framebuffer_bo,
base)->buffer;
struct vmw_kms_dirty dirty;
+ DECLARE_VAL_CONTEXT(val_ctx, NULL, 0);
int ret;
- ret = vmw_kms_helper_buffer_prepare(dev_priv, buf, interruptible,
- false, false);
+ ret = vmw_validation_add_bo(&val_ctx, buf, false, false);
if (ret)
return ret;
+ ret = vmw_validation_prepare(&val_ctx, NULL, interruptible);
+ if (ret)
+ goto out_unref;
+
ret = do_bo_define_gmrfb(dev_priv, framebuffer);
if (unlikely(ret != 0))
goto out_revert;
@@ -1069,12 +1082,15 @@ int vmw_kms_sou_do_bo_dirty(struct vmw_private *dev_priv,
num_clips;
ret = vmw_kms_helper_dirty(dev_priv, framebuffer, clips, vclips,
0, 0, num_clips, increment, &dirty);
- vmw_kms_helper_buffer_finish(dev_priv, NULL, buf, out_fence, NULL);
+ vmw_kms_helper_validation_finish(dev_priv, NULL, &val_ctx, out_fence,
+ NULL);
return ret;
out_revert:
- vmw_kms_helper_buffer_revert(buf);
+ vmw_validation_revert(&val_ctx);
+out_unref:
+ vmw_validation_unref_lists(&val_ctx);
return ret;
}
@@ -1150,13 +1166,17 @@ int vmw_kms_sou_readback(struct vmw_private *dev_priv,
struct vmw_buffer_object *buf =
container_of(vfb, struct vmw_framebuffer_bo, base)->buffer;
struct vmw_kms_dirty dirty;
+ DECLARE_VAL_CONTEXT(val_ctx, NULL, 0);
int ret;
- ret = vmw_kms_helper_buffer_prepare(dev_priv, buf, true, false,
- false);
+ ret = vmw_validation_add_bo(&val_ctx, buf, false, false);
if (ret)
return ret;
+ ret = vmw_validation_prepare(&val_ctx, NULL, true);
+ if (ret)
+ goto out_unref;
+
ret = do_bo_define_gmrfb(dev_priv, vfb);
if (unlikely(ret != 0))
goto out_revert;
@@ -1168,13 +1188,15 @@ int vmw_kms_sou_readback(struct vmw_private *dev_priv,
num_clips;
ret = vmw_kms_helper_dirty(dev_priv, vfb, NULL, vclips,
0, 0, num_clips, 1, &dirty);
- vmw_kms_helper_buffer_finish(dev_priv, file_priv, buf, NULL,
- user_fence_rep);
+ vmw_kms_helper_validation_finish(dev_priv, file_priv, &val_ctx, NULL,
+ user_fence_rep);
return ret;
out_revert:
- vmw_kms_helper_buffer_revert(buf);
-
+ vmw_validation_revert(&val_ctx);
+out_unref:
+ vmw_validation_unref_lists(&val_ctx);
+
return ret;
}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_shader.c b/drivers/gpu/drm/vmwgfx/vmwgfx_shader.c
index fe4842ca3b6e..bf32fe446219 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_shader.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_shader.c
@@ -186,7 +186,7 @@ static int vmw_gb_shader_init(struct vmw_private *dev_priv,
shader->num_input_sig = num_input_sig;
shader->num_output_sig = num_output_sig;
- vmw_resource_activate(res, vmw_hw_shader_destroy);
+ res->hw_destroy = vmw_hw_shader_destroy;
return 0;
}
@@ -562,7 +562,7 @@ void vmw_dx_shader_cotable_list_scrub(struct vmw_private *dev_priv,
{
struct vmw_dx_shader *entry, *next;
- WARN_ON_ONCE(!mutex_is_locked(&dev_priv->binding_mutex));
+ lockdep_assert_held_once(&dev_priv->binding_mutex);
list_for_each_entry_safe(entry, next, list, cotable_head) {
WARN_ON(vmw_dx_shader_scrub(&entry->res));
@@ -636,7 +636,8 @@ int vmw_dx_shader_add(struct vmw_cmdbuf_res_manager *man,
res = &shader->res;
shader->ctx = ctx;
- shader->cotable = vmw_context_cotable(ctx, SVGA_COTABLE_DXSHADER);
+ shader->cotable = vmw_resource_reference
+ (vmw_context_cotable(ctx, SVGA_COTABLE_DXSHADER));
shader->id = user_key;
shader->committed = false;
INIT_LIST_HEAD(&shader->cotable_head);
@@ -656,7 +657,7 @@ int vmw_dx_shader_add(struct vmw_cmdbuf_res_manager *man,
goto out_resource_init;
res->id = shader->id;
- vmw_resource_activate(res, vmw_hw_shader_destroy);
+ res->hw_destroy = vmw_hw_shader_destroy;
out_resource_init:
vmw_resource_unreference(&res);
@@ -740,13 +741,10 @@ static int vmw_user_shader_alloc(struct vmw_private *dev_priv,
};
int ret;
- /*
- * Approximate idr memory usage with 128 bytes. It will be limited
- * by maximum number_of shaders anyway.
- */
if (unlikely(vmw_user_shader_size == 0))
vmw_user_shader_size =
- ttm_round_pot(sizeof(struct vmw_user_shader)) + 128;
+ ttm_round_pot(sizeof(struct vmw_user_shader)) +
+ VMW_IDA_ACC_SIZE + TTM_OBJ_EXTRA_SIZE;
ret = ttm_mem_global_alloc(vmw_mem_glob(dev_priv),
vmw_user_shader_size,
@@ -792,7 +790,7 @@ static int vmw_user_shader_alloc(struct vmw_private *dev_priv,
}
if (handle)
- *handle = ushader->base.hash.key;
+ *handle = ushader->base.handle;
out_err:
vmw_resource_unreference(&res);
out:
@@ -814,13 +812,10 @@ static struct vmw_resource *vmw_shader_alloc(struct vmw_private *dev_priv,
};
int ret;
- /*
- * Approximate idr memory usage with 128 bytes. It will be limited
- * by maximum number_of shaders anyway.
- */
if (unlikely(vmw_shader_size == 0))
vmw_shader_size =
- ttm_round_pot(sizeof(struct vmw_shader)) + 128;
+ ttm_round_pot(sizeof(struct vmw_shader)) +
+ VMW_IDA_ACC_SIZE;
ret = ttm_mem_global_alloc(vmw_mem_glob(dev_priv),
vmw_shader_size,
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_simple_resource.c b/drivers/gpu/drm/vmwgfx/vmwgfx_simple_resource.c
index 6ebc5affde14..6a6865384e91 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_simple_resource.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_simple_resource.c
@@ -81,7 +81,7 @@ static int vmw_simple_resource_init(struct vmw_private *dev_priv,
return ret;
}
- vmw_resource_activate(&simple->res, simple->func->hw_destroy);
+ simple->res.hw_destroy = simple->func->hw_destroy;
return 0;
}
@@ -159,7 +159,8 @@ vmw_simple_resource_create_ioctl(struct drm_device *dev, void *data,
alloc_size = offsetof(struct vmw_user_simple_resource, simple) +
func->size;
- account_size = ttm_round_pot(alloc_size) + VMW_IDA_ACC_SIZE;
+ account_size = ttm_round_pot(alloc_size) + VMW_IDA_ACC_SIZE +
+ TTM_OBJ_EXTRA_SIZE;
ret = ttm_read_lock(&dev_priv->reservation_sem, true);
if (ret)
@@ -208,7 +209,7 @@ vmw_simple_resource_create_ioctl(struct drm_device *dev, void *data,
goto out_err;
}
- func->set_arg_handle(data, usimple->base.hash.key);
+ func->set_arg_handle(data, usimple->base.handle);
out_err:
vmw_resource_unreference(&res);
out_ret:
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_so.c b/drivers/gpu/drm/vmwgfx/vmwgfx_so.c
index e9b6b7baa009..bc8bb690f1ea 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_so.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_so.c
@@ -208,7 +208,7 @@ static int vmw_view_destroy(struct vmw_resource *res)
union vmw_view_destroy body;
} *cmd;
- WARN_ON_ONCE(!mutex_is_locked(&dev_priv->binding_mutex));
+ lockdep_assert_held_once(&dev_priv->binding_mutex);
vmw_binding_res_list_scrub(&res->binding_head);
if (!view->committed || res->id == -1)
@@ -366,7 +366,8 @@ int vmw_view_add(struct vmw_cmdbuf_res_manager *man,
res = &view->res;
view->ctx = ctx;
view->srf = vmw_resource_reference(srf);
- view->cotable = vmw_context_cotable(ctx, vmw_view_cotables[view_type]);
+ view->cotable = vmw_resource_reference
+ (vmw_context_cotable(ctx, vmw_view_cotables[view_type]));
view->view_type = view_type;
view->view_id = user_key;
view->cmd_size = cmd_size;
@@ -386,7 +387,7 @@ int vmw_view_add(struct vmw_cmdbuf_res_manager *man,
goto out_resource_init;
res->id = view->view_id;
- vmw_resource_activate(res, vmw_hw_view_destroy);
+ res->hw_destroy = vmw_hw_view_destroy;
out_resource_init:
vmw_resource_unreference(&res);
@@ -439,7 +440,7 @@ void vmw_view_cotable_list_destroy(struct vmw_private *dev_priv,
{
struct vmw_view *entry, *next;
- WARN_ON_ONCE(!mutex_is_locked(&dev_priv->binding_mutex));
+ lockdep_assert_held_once(&dev_priv->binding_mutex);
list_for_each_entry_safe(entry, next, list, cotable_head)
WARN_ON(vmw_view_destroy(&entry->res));
@@ -459,7 +460,7 @@ void vmw_view_surface_list_destroy(struct vmw_private *dev_priv,
{
struct vmw_view *entry, *next;
- WARN_ON_ONCE(!mutex_is_locked(&dev_priv->binding_mutex));
+ lockdep_assert_held_once(&dev_priv->binding_mutex);
list_for_each_entry_safe(entry, next, list, srf_head)
WARN_ON(vmw_view_destroy(&entry->res));
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c b/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c
index f30e839f7bfd..e086565c1da6 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c
@@ -759,17 +759,21 @@ int vmw_kms_stdu_dma(struct vmw_private *dev_priv,
struct vmw_stdu_dirty ddirty;
int ret;
bool cpu_blit = !(dev_priv->capabilities & SVGA_CAP_3D);
+ DECLARE_VAL_CONTEXT(val_ctx, NULL, 0);
/*
* VMs without 3D support don't have the surface DMA command and
* we'll be using a CPU blit, and the framebuffer should be moved out
* of VRAM.
*/
- ret = vmw_kms_helper_buffer_prepare(dev_priv, buf, interruptible,
- false, cpu_blit);
+ ret = vmw_validation_add_bo(&val_ctx, buf, false, cpu_blit);
if (ret)
return ret;
+ ret = vmw_validation_prepare(&val_ctx, NULL, interruptible);
+ if (ret)
+ goto out_unref;
+
ddirty.transfer = (to_surface) ? SVGA3D_WRITE_HOST_VRAM :
SVGA3D_READ_HOST_VRAM;
ddirty.left = ddirty.top = S32_MAX;
@@ -796,9 +800,13 @@ int vmw_kms_stdu_dma(struct vmw_private *dev_priv,
ret = vmw_kms_helper_dirty(dev_priv, vfb, clips, vclips,
0, 0, num_clips, increment, &ddirty.base);
- vmw_kms_helper_buffer_finish(dev_priv, file_priv, buf, NULL,
- user_fence_rep);
+ vmw_kms_helper_validation_finish(dev_priv, file_priv, &val_ctx, NULL,
+ user_fence_rep);
+ return ret;
+
+out_unref:
+ vmw_validation_unref_lists(&val_ctx);
return ret;
}
@@ -924,16 +932,20 @@ int vmw_kms_stdu_surface_dirty(struct vmw_private *dev_priv,
struct vmw_framebuffer_surface *vfbs =
container_of(framebuffer, typeof(*vfbs), base);
struct vmw_stdu_dirty sdirty;
- struct vmw_validation_ctx ctx;
+ DECLARE_VAL_CONTEXT(val_ctx, NULL, 0);
int ret;
if (!srf)
srf = &vfbs->surface->res;
- ret = vmw_kms_helper_resource_prepare(srf, true, &ctx);
+ ret = vmw_validation_add_resource(&val_ctx, srf, 0, NULL, NULL);
if (ret)
return ret;
+ ret = vmw_validation_prepare(&val_ctx, &dev_priv->cmdbuf_mutex, true);
+ if (ret)
+ goto out_unref;
+
if (vfbs->is_bo_proxy) {
ret = vmw_kms_update_proxy(srf, clips, num_clips, inc);
if (ret)
@@ -954,8 +966,13 @@ int vmw_kms_stdu_surface_dirty(struct vmw_private *dev_priv,
dest_x, dest_y, num_clips, inc,
&sdirty.base);
out_finish:
- vmw_kms_helper_resource_finish(&ctx, out_fence);
+ vmw_kms_helper_validation_finish(dev_priv, NULL, &val_ctx, out_fence,
+ NULL);
+
+ return ret;
+out_unref:
+ vmw_validation_unref_lists(&val_ctx);
return ret;
}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c b/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c
index 80a01cd4c051..ef09f7edf931 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c
@@ -614,7 +614,7 @@ static int vmw_surface_init(struct vmw_private *dev_priv,
*/
INIT_LIST_HEAD(&srf->view_list);
- vmw_resource_activate(res, vmw_hw_surface_destroy);
+ res->hw_destroy = vmw_hw_surface_destroy;
return ret;
}
@@ -731,7 +731,7 @@ int vmw_surface_define_ioctl(struct drm_device *dev, void *data,
if (unlikely(vmw_user_surface_size == 0))
vmw_user_surface_size = ttm_round_pot(sizeof(*user_srf)) +
- 128;
+ VMW_IDA_ACC_SIZE + TTM_OBJ_EXTRA_SIZE;
num_sizes = 0;
for (i = 0; i < DRM_VMW_MAX_SURFACE_FACES; ++i) {
@@ -744,7 +744,7 @@ int vmw_surface_define_ioctl(struct drm_device *dev, void *data,
num_sizes == 0)
return -EINVAL;
- size = vmw_user_surface_size + 128 +
+ size = vmw_user_surface_size +
ttm_round_pot(num_sizes * sizeof(struct drm_vmw_size)) +
ttm_round_pot(num_sizes * sizeof(struct vmw_surface_offset));
@@ -886,7 +886,7 @@ int vmw_surface_define_ioctl(struct drm_device *dev, void *data,
goto out_unlock;
}
- rep->sid = user_srf->prime.base.hash.key;
+ rep->sid = user_srf->prime.base.handle;
vmw_resource_unreference(&res);
ttm_read_unlock(&dev_priv->reservation_sem);
@@ -1024,7 +1024,7 @@ int vmw_surface_reference_ioctl(struct drm_device *dev, void *data,
if (unlikely(ret != 0)) {
DRM_ERROR("copy_to_user failed %p %u\n",
user_sizes, srf->num_sizes);
- ttm_ref_object_base_unref(tfile, base->hash.key, TTM_REF_USAGE);
+ ttm_ref_object_base_unref(tfile, base->handle, TTM_REF_USAGE);
ret = -EFAULT;
}
@@ -1613,9 +1613,9 @@ vmw_gb_surface_define_internal(struct drm_device *dev,
if (unlikely(vmw_user_surface_size == 0))
vmw_user_surface_size = ttm_round_pot(sizeof(*user_srf)) +
- 128;
+ VMW_IDA_ACC_SIZE + TTM_OBJ_EXTRA_SIZE;
- size = vmw_user_surface_size + 128;
+ size = vmw_user_surface_size;
/* Define a surface based on the parameters. */
ret = vmw_surface_gb_priv_define(dev,
@@ -1687,7 +1687,7 @@ vmw_gb_surface_define_internal(struct drm_device *dev,
goto out_unlock;
}
- rep->handle = user_srf->prime.base.hash.key;
+ rep->handle = user_srf->prime.base.handle;
rep->backup_size = res->backup_size;
if (res->backup) {
rep->buffer_map_handle =
@@ -1749,7 +1749,7 @@ vmw_gb_surface_reference_internal(struct drm_device *dev,
if (unlikely(ret != 0)) {
DRM_ERROR("Could not add a reference to a GB surface "
"backup buffer.\n");
- (void) ttm_ref_object_base_unref(tfile, base->hash.key,
+ (void) ttm_ref_object_base_unref(tfile, base->handle,
TTM_REF_USAGE);
goto out_bad_resource;
}
@@ -1763,7 +1763,7 @@ vmw_gb_surface_reference_internal(struct drm_device *dev,
rep->creq.base.array_size = srf->array_size;
rep->creq.base.buffer_handle = backup_handle;
rep->creq.base.base_size = srf->base_size;
- rep->crep.handle = user_srf->prime.base.hash.key;
+ rep->crep.handle = user_srf->prime.base.handle;
rep->crep.backup_size = srf->res.backup_size;
rep->crep.buffer_handle = backup_handle;
rep->crep.buffer_map_handle =
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_validation.c b/drivers/gpu/drm/vmwgfx/vmwgfx_validation.c
new file mode 100644
index 000000000000..184025fa938e
--- /dev/null
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_validation.c
@@ -0,0 +1,770 @@
+// SPDX-License-Identifier: GPL-2.0 OR MIT
+/**************************************************************************
+ *
+ * Copyright © 2018 VMware, Inc., Palo Alto, CA., USA
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+#include <linux/slab.h>
+#include "vmwgfx_validation.h"
+#include "vmwgfx_drv.h"
+
+/**
+ * struct vmw_validation_bo_node - Buffer object validation metadata.
+ * @base: Metadata used for TTM reservation- and validation.
+ * @hash: A hash entry used for the duplicate detection hash table.
+ * @as_mob: Validate as mob.
+ * @cpu_blit: Validate for cpu blit access.
+ *
+ * Bit fields are used since these structures are allocated and freed in
+ * large numbers and space conservation is desired.
+ */
+struct vmw_validation_bo_node {
+ struct ttm_validate_buffer base;
+ struct drm_hash_item hash;
+ u32 as_mob : 1;
+ u32 cpu_blit : 1;
+};
+
+/**
+ * struct vmw_validation_res_node - Resource validation metadata.
+ * @head: List head for the resource validation list.
+ * @hash: A hash entry used for the duplicate detection hash table.
+ * @res: Reference counted resource pointer.
+ * @new_backup: Non ref-counted pointer to new backup buffer to be assigned
+ * to a resource.
+ * @new_backup_offset: Offset into the new backup mob for resources that can
+ * share MOBs.
+ * @no_buffer_needed: Kernel does not need to allocate a MOB during validation,
+ * the command stream provides a mob bind operation.
+ * @switching_backup: The validation process is switching backup MOB.
+ * @first_usage: True iff the resource has been seen only once in the current
+ * validation batch.
+ * @reserved: Whether the resource is currently reserved by this process.
+ * @private: Optionally additional memory for caller-private data.
+ *
+ * Bit fields are used since these structures are allocated and freed in
+ * large numbers and space conservation is desired.
+ */
+struct vmw_validation_res_node {
+ struct list_head head;
+ struct drm_hash_item hash;
+ struct vmw_resource *res;
+ struct vmw_buffer_object *new_backup;
+ unsigned long new_backup_offset;
+ u32 no_buffer_needed : 1;
+ u32 switching_backup : 1;
+ u32 first_usage : 1;
+ u32 reserved : 1;
+ unsigned long private[0];
+};
+
+/**
+ * vmw_validation_mem_alloc - Allocate kernel memory from the validation
+ * context based allocator
+ * @ctx: The validation context
+ * @size: The number of bytes to allocated.
+ *
+ * The memory allocated may not exceed PAGE_SIZE, and the returned
+ * address is aligned to sizeof(long). All memory allocated this way is
+ * reclaimed after validation when calling any of the exported functions:
+ * vmw_validation_unref_lists()
+ * vmw_validation_revert()
+ * vmw_validation_done()
+ *
+ * Return: Pointer to the allocated memory on success. NULL on failure.
+ */
+void *vmw_validation_mem_alloc(struct vmw_validation_context *ctx,
+ unsigned int size)
+{
+ void *addr;
+
+ size = vmw_validation_align(size);
+ if (size > PAGE_SIZE)
+ return NULL;
+
+ if (ctx->mem_size_left < size) {
+ struct page *page = alloc_page(GFP_KERNEL | __GFP_ZERO);
+
+ if (!page)
+ return NULL;
+
+ list_add_tail(&page->lru, &ctx->page_list);
+ ctx->page_address = page_address(page);
+ ctx->mem_size_left = PAGE_SIZE;
+ }
+
+ addr = (void *) (ctx->page_address + (PAGE_SIZE - ctx->mem_size_left));
+ ctx->mem_size_left -= size;
+
+ return addr;
+}
+
+/**
+ * vmw_validation_mem_free - Free all memory allocated using
+ * vmw_validation_mem_alloc()
+ * @ctx: The validation context
+ *
+ * All memory previously allocated for this context using
+ * vmw_validation_mem_alloc() is freed.
+ */
+static void vmw_validation_mem_free(struct vmw_validation_context *ctx)
+{
+ struct page *entry, *next;
+
+ list_for_each_entry_safe(entry, next, &ctx->page_list, lru) {
+ list_del_init(&entry->lru);
+ __free_page(entry);
+ }
+
+ ctx->mem_size_left = 0;
+}
+
+/**
+ * vmw_validation_find_bo_dup - Find a duplicate buffer object entry in the
+ * validation context's lists.
+ * @ctx: The validation context to search.
+ * @vbo: The buffer object to search for.
+ *
+ * Return: Pointer to the struct vmw_validation_bo_node referencing the
+ * duplicate, or NULL if none found.
+ */
+static struct vmw_validation_bo_node *
+vmw_validation_find_bo_dup(struct vmw_validation_context *ctx,
+ struct vmw_buffer_object *vbo)
+{
+ struct vmw_validation_bo_node *bo_node = NULL;
+
+ if (!ctx->merge_dups)
+ return NULL;
+
+ if (ctx->ht) {
+ struct drm_hash_item *hash;
+
+ if (!drm_ht_find_item(ctx->ht, (unsigned long) vbo, &hash))
+ bo_node = container_of(hash, typeof(*bo_node), hash);
+ } else {
+ struct vmw_validation_bo_node *entry;
+
+ list_for_each_entry(entry, &ctx->bo_list, base.head) {
+ if (entry->base.bo == &vbo->base) {
+ bo_node = entry;
+ break;
+ }
+ }
+ }
+
+ return bo_node;
+}
+
+/**
+ * vmw_validation_find_res_dup - Find a duplicate resource entry in the
+ * validation context's lists.
+ * @ctx: The validation context to search.
+ * @vbo: The buffer object to search for.
+ *
+ * Return: Pointer to the struct vmw_validation_bo_node referencing the
+ * duplicate, or NULL if none found.
+ */
+static struct vmw_validation_res_node *
+vmw_validation_find_res_dup(struct vmw_validation_context *ctx,
+ struct vmw_resource *res)
+{
+ struct vmw_validation_res_node *res_node = NULL;
+
+ if (!ctx->merge_dups)
+ return NULL;
+
+ if (ctx->ht) {
+ struct drm_hash_item *hash;
+
+ if (!drm_ht_find_item(ctx->ht, (unsigned long) res, &hash))
+ res_node = container_of(hash, typeof(*res_node), hash);
+ } else {
+ struct vmw_validation_res_node *entry;
+
+ list_for_each_entry(entry, &ctx->resource_ctx_list, head) {
+ if (entry->res == res) {
+ res_node = entry;
+ goto out;
+ }
+ }
+
+ list_for_each_entry(entry, &ctx->resource_list, head) {
+ if (entry->res == res) {
+ res_node = entry;
+ break;
+ }
+ }
+
+ }
+out:
+ return res_node;
+}
+
+/**
+ * vmw_validation_add_bo - Add a buffer object to the validation context.
+ * @ctx: The validation context.
+ * @vbo: The buffer object.
+ * @as_mob: Validate as mob, otherwise suitable for GMR operations.
+ * @cpu_blit: Validate in a page-mappable location.
+ *
+ * Return: Zero on success, negative error code otherwise.
+ */
+int vmw_validation_add_bo(struct vmw_validation_context *ctx,
+ struct vmw_buffer_object *vbo,
+ bool as_mob,
+ bool cpu_blit)
+{
+ struct vmw_validation_bo_node *bo_node;
+
+ bo_node = vmw_validation_find_bo_dup(ctx, vbo);
+ if (bo_node) {
+ if (bo_node->as_mob != as_mob ||
+ bo_node->cpu_blit != cpu_blit) {
+ DRM_ERROR("Inconsistent buffer usage.\n");
+ return -EINVAL;
+ }
+ } else {
+ struct ttm_validate_buffer *val_buf;
+ int ret;
+
+ bo_node = vmw_validation_mem_alloc(ctx, sizeof(*bo_node));
+ if (!bo_node)
+ return -ENOMEM;
+
+ if (ctx->ht) {
+ bo_node->hash.key = (unsigned long) vbo;
+ ret = drm_ht_insert_item(ctx->ht, &bo_node->hash);
+ if (ret) {
+ DRM_ERROR("Failed to initialize a buffer "
+ "validation entry.\n");
+ return ret;
+ }
+ }
+ val_buf = &bo_node->base;
+ val_buf->bo = ttm_bo_get_unless_zero(&vbo->base);
+ if (!val_buf->bo)
+ return -ESRCH;
+ val_buf->shared = false;
+ list_add_tail(&val_buf->head, &ctx->bo_list);
+ bo_node->as_mob = as_mob;
+ bo_node->cpu_blit = cpu_blit;
+ }
+
+ return 0;
+}
+
+/**
+ * vmw_validation_add_resource - Add a resource to the validation context.
+ * @ctx: The validation context.
+ * @res: The resource.
+ * @priv_size: Size of private, additional metadata.
+ * @p_node: Output pointer of additional metadata address.
+ * @first_usage: Whether this was the first time this resource was seen.
+ *
+ * Return: Zero on success, negative error code otherwise.
+ */
+int vmw_validation_add_resource(struct vmw_validation_context *ctx,
+ struct vmw_resource *res,
+ size_t priv_size,
+ void **p_node,
+ bool *first_usage)
+{
+ struct vmw_validation_res_node *node;
+ int ret;
+
+ node = vmw_validation_find_res_dup(ctx, res);
+ if (node) {
+ node->first_usage = 0;
+ goto out_fill;
+ }
+
+ node = vmw_validation_mem_alloc(ctx, sizeof(*node) + priv_size);
+ if (!node) {
+ DRM_ERROR("Failed to allocate a resource validation "
+ "entry.\n");
+ return -ENOMEM;
+ }
+
+ if (ctx->ht) {
+ node->hash.key = (unsigned long) res;
+ ret = drm_ht_insert_item(ctx->ht, &node->hash);
+ if (ret) {
+ DRM_ERROR("Failed to initialize a resource validation "
+ "entry.\n");
+ return ret;
+ }
+ }
+ node->res = vmw_resource_reference_unless_doomed(res);
+ if (!node->res)
+ return -ESRCH;
+
+ node->first_usage = 1;
+ if (!res->dev_priv->has_mob) {
+ list_add_tail(&node->head, &ctx->resource_list);
+ } else {
+ switch (vmw_res_type(res)) {
+ case vmw_res_context:
+ case vmw_res_dx_context:
+ list_add(&node->head, &ctx->resource_ctx_list);
+ break;
+ case vmw_res_cotable:
+ list_add_tail(&node->head, &ctx->resource_ctx_list);
+ break;
+ default:
+ list_add_tail(&node->head, &ctx->resource_list);
+ break;
+ }
+ }
+
+out_fill:
+ if (first_usage)
+ *first_usage = node->first_usage;
+ if (p_node)
+ *p_node = &node->private;
+
+ return 0;
+}
+
+/**
+ * vmw_validation_res_switch_backup - Register a backup MOB switch during
+ * validation.
+ * @ctx: The validation context.
+ * @val_private: The additional meta-data pointer returned when the
+ * resource was registered with the validation context. Used to identify
+ * the resource.
+ * @vbo: The new backup buffer object MOB. This buffer object needs to have
+ * already been registered with the validation context.
+ * @backup_offset: Offset into the new backup MOB.
+ */
+void vmw_validation_res_switch_backup(struct vmw_validation_context *ctx,
+ void *val_private,
+ struct vmw_buffer_object *vbo,
+ unsigned long backup_offset)
+{
+ struct vmw_validation_res_node *val;
+
+ val = container_of(val_private, typeof(*val), private);
+
+ val->switching_backup = 1;
+ if (val->first_usage)
+ val->no_buffer_needed = 1;
+
+ val->new_backup = vbo;
+ val->new_backup_offset = backup_offset;
+}
+
+/**
+ * vmw_validation_res_reserve - Reserve all resources registered with this
+ * validation context.
+ * @ctx: The validation context.
+ * @intr: Use interruptible waits when possible.
+ *
+ * Return: Zero on success, -ERESTARTSYS if interrupted. Negative error
+ * code on failure.
+ */
+int vmw_validation_res_reserve(struct vmw_validation_context *ctx,
+ bool intr)
+{
+ struct vmw_validation_res_node *val;
+ int ret = 0;
+
+ list_splice_init(&ctx->resource_ctx_list, &ctx->resource_list);
+
+ list_for_each_entry(val, &ctx->resource_list, head) {
+ struct vmw_resource *res = val->res;
+
+ ret = vmw_resource_reserve(res, intr, val->no_buffer_needed);
+ if (ret)
+ goto out_unreserve;
+
+ val->reserved = 1;
+ if (res->backup) {
+ struct vmw_buffer_object *vbo = res->backup;
+
+ ret = vmw_validation_add_bo
+ (ctx, vbo, vmw_resource_needs_backup(res),
+ false);
+ if (ret)
+ goto out_unreserve;
+ }
+ }
+
+ return 0;
+
+out_unreserve:
+ vmw_validation_res_unreserve(ctx, true);
+ return ret;
+}
+
+/**
+ * vmw_validation_res_unreserve - Unreserve all reserved resources
+ * registered with this validation context.
+ * @ctx: The validation context.
+ * @backoff: Whether this is a backoff- of a commit-type operation. This
+ * is used to determine whether to switch backup MOBs or not.
+ */
+void vmw_validation_res_unreserve(struct vmw_validation_context *ctx,
+ bool backoff)
+{
+ struct vmw_validation_res_node *val;
+
+ list_splice_init(&ctx->resource_ctx_list, &ctx->resource_list);
+
+ list_for_each_entry(val, &ctx->resource_list, head) {
+ if (val->reserved)
+ vmw_resource_unreserve(val->res,
+ !backoff &&
+ val->switching_backup,
+ val->new_backup,
+ val->new_backup_offset);
+ }
+}
+
+/**
+ * vmw_validation_bo_validate_single - Validate a single buffer object.
+ * @bo: The TTM buffer object base.
+ * @interruptible: Whether to perform waits interruptible if possible.
+ * @validate_as_mob: Whether to validate in MOB memory.
+ *
+ * Return: Zero on success, -ERESTARTSYS if interrupted. Negative error
+ * code on failure.
+ */
+int vmw_validation_bo_validate_single(struct ttm_buffer_object *bo,
+ bool interruptible,
+ bool validate_as_mob)
+{
+ struct vmw_buffer_object *vbo =
+ container_of(bo, struct vmw_buffer_object, base);
+ struct ttm_operation_ctx ctx = {
+ .interruptible = interruptible,
+ .no_wait_gpu = false
+ };
+ int ret;
+
+ if (vbo->pin_count > 0)
+ return 0;
+
+ if (validate_as_mob)
+ return ttm_bo_validate(bo, &vmw_mob_placement, &ctx);
+
+ /**
+ * Put BO in VRAM if there is space, otherwise as a GMR.
+ * If there is no space in VRAM and GMR ids are all used up,
+ * start evicting GMRs to make room. If the DMA buffer can't be
+ * used as a GMR, this will return -ENOMEM.
+ */
+
+ ret = ttm_bo_validate(bo, &vmw_vram_gmr_placement, &ctx);
+ if (ret == 0 || ret == -ERESTARTSYS)
+ return ret;
+
+ /**
+ * If that failed, try VRAM again, this time evicting
+ * previous contents.
+ */
+
+ ret = ttm_bo_validate(bo, &vmw_vram_placement, &ctx);
+ return ret;
+}
+
+/**
+ * vmw_validation_bo_validate - Validate all buffer objects registered with
+ * the validation context.
+ * @ctx: The validation context.
+ * @intr: Whether to perform waits interruptible if possible.
+ *
+ * Return: Zero on success, -ERESTARTSYS if interrupted,
+ * negative error code on failure.
+ */
+int vmw_validation_bo_validate(struct vmw_validation_context *ctx, bool intr)
+{
+ struct vmw_validation_bo_node *entry;
+ int ret;
+
+ list_for_each_entry(entry, &ctx->bo_list, base.head) {
+ if (entry->cpu_blit) {
+ struct ttm_operation_ctx ctx = {
+ .interruptible = intr,
+ .no_wait_gpu = false
+ };
+
+ ret = ttm_bo_validate(entry->base.bo,
+ &vmw_nonfixed_placement, &ctx);
+ } else {
+ ret = vmw_validation_bo_validate_single
+ (entry->base.bo, intr, entry->as_mob);
+ }
+ if (ret)
+ return ret;
+ }
+ return 0;
+}
+
+/**
+ * vmw_validation_res_validate - Validate all resources registered with the
+ * validation context.
+ * @ctx: The validation context.
+ * @intr: Whether to perform waits interruptible if possible.
+ *
+ * Before this function is called, all resource backup buffers must have
+ * been validated.
+ *
+ * Return: Zero on success, -ERESTARTSYS if interrupted,
+ * negative error code on failure.
+ */
+int vmw_validation_res_validate(struct vmw_validation_context *ctx, bool intr)
+{
+ struct vmw_validation_res_node *val;
+ int ret;
+
+ list_for_each_entry(val, &ctx->resource_list, head) {
+ struct vmw_resource *res = val->res;
+ struct vmw_buffer_object *backup = res->backup;
+
+ ret = vmw_resource_validate(res, intr);
+ if (ret) {
+ if (ret != -ERESTARTSYS)
+ DRM_ERROR("Failed to validate resource.\n");
+ return ret;
+ }
+
+ /* Check if the resource switched backup buffer */
+ if (backup && res->backup && (backup != res->backup)) {
+ struct vmw_buffer_object *vbo = res->backup;
+
+ ret = vmw_validation_add_bo
+ (ctx, vbo, vmw_resource_needs_backup(res),
+ false);
+ if (ret)
+ return ret;
+ }
+ }
+ return 0;
+}
+
+/**
+ * vmw_validation_drop_ht - Reset the hash table used for duplicate finding
+ * and unregister it from this validation context.
+ * @ctx: The validation context.
+ *
+ * The hash table used for duplicate finding is an expensive resource and
+ * may be protected by mutexes that may cause deadlocks during resource
+ * unreferencing if held. After resource- and buffer object registering,
+ * there is no longer any use for this hash table, so allow freeing it
+ * either to shorten any mutex locking time, or before resources- and
+ * buffer objects are freed during validation context cleanup.
+ */
+void vmw_validation_drop_ht(struct vmw_validation_context *ctx)
+{
+ struct vmw_validation_bo_node *entry;
+ struct vmw_validation_res_node *val;
+
+ if (!ctx->ht)
+ return;
+
+ list_for_each_entry(entry, &ctx->bo_list, base.head)
+ (void) drm_ht_remove_item(ctx->ht, &entry->hash);
+
+ list_for_each_entry(val, &ctx->resource_list, head)
+ (void) drm_ht_remove_item(ctx->ht, &val->hash);
+
+ list_for_each_entry(val, &ctx->resource_ctx_list, head)
+ (void) drm_ht_remove_item(ctx->ht, &val->hash);
+
+ ctx->ht = NULL;
+}
+
+/**
+ * vmw_validation_unref_lists - Unregister previously registered buffer
+ * object and resources.
+ * @ctx: The validation context.
+ *
+ * Note that this function may cause buffer object- and resource destructors
+ * to be invoked.
+ */
+void vmw_validation_unref_lists(struct vmw_validation_context *ctx)
+{
+ struct vmw_validation_bo_node *entry;
+ struct vmw_validation_res_node *val;
+
+ list_for_each_entry(entry, &ctx->bo_list, base.head)
+ ttm_bo_unref(&entry->base.bo);
+
+ list_splice_init(&ctx->resource_ctx_list, &ctx->resource_list);
+ list_for_each_entry(val, &ctx->resource_list, head)
+ vmw_resource_unreference(&val->res);
+
+ /*
+ * No need to detach each list entry since they are all freed with
+ * vmw_validation_free_mem. Just make the inaccessible.
+ */
+ INIT_LIST_HEAD(&ctx->bo_list);
+ INIT_LIST_HEAD(&ctx->resource_list);
+
+ vmw_validation_mem_free(ctx);
+}
+
+/**
+ * vmw_validation_prepare - Prepare a validation context for command
+ * submission.
+ * @ctx: The validation context.
+ * @mutex: The mutex used to protect resource reservation.
+ * @intr: Whether to perform waits interruptible if possible.
+ *
+ * Note that the single reservation mutex @mutex is an unfortunate
+ * construct. Ideally resource reservation should be moved to per-resource
+ * ww_mutexes.
+ * If this functions doesn't return Zero to indicate success, all resources
+ * are left unreserved but still referenced.
+ * Return: Zero on success, -ERESTARTSYS if interrupted, negative error code
+ * on error.
+ */
+int vmw_validation_prepare(struct vmw_validation_context *ctx,
+ struct mutex *mutex,
+ bool intr)
+{
+ int ret = 0;
+
+ if (mutex) {
+ if (intr)
+ ret = mutex_lock_interruptible(mutex);
+ else
+ mutex_lock(mutex);
+ if (ret)
+ return -ERESTARTSYS;
+ }
+
+ ctx->res_mutex = mutex;
+ ret = vmw_validation_res_reserve(ctx, intr);
+ if (ret)
+ goto out_no_res_reserve;
+
+ ret = vmw_validation_bo_reserve(ctx, intr);
+ if (ret)
+ goto out_no_bo_reserve;
+
+ ret = vmw_validation_bo_validate(ctx, intr);
+ if (ret)
+ goto out_no_validate;
+
+ ret = vmw_validation_res_validate(ctx, intr);
+ if (ret)
+ goto out_no_validate;
+
+ return 0;
+
+out_no_validate:
+ vmw_validation_bo_backoff(ctx);
+out_no_bo_reserve:
+ vmw_validation_res_unreserve(ctx, true);
+out_no_res_reserve:
+ if (mutex)
+ mutex_unlock(mutex);
+
+ return ret;
+}
+
+/**
+ * vmw_validation_revert - Revert validation actions if command submission
+ * failed.
+ *
+ * @ctx: The validation context.
+ *
+ * The caller still needs to unref resources after a call to this function.
+ */
+void vmw_validation_revert(struct vmw_validation_context *ctx)
+{
+ vmw_validation_bo_backoff(ctx);
+ vmw_validation_res_unreserve(ctx, true);
+ if (ctx->res_mutex)
+ mutex_unlock(ctx->res_mutex);
+ vmw_validation_unref_lists(ctx);
+}
+
+/**
+ * vmw_validation_cone - Commit validation actions after command submission
+ * success.
+ * @ctx: The validation context.
+ * @fence: Fence with which to fence all buffer objects taking part in the
+ * command submission.
+ *
+ * The caller does NOT need to unref resources after a call to this function.
+ */
+void vmw_validation_done(struct vmw_validation_context *ctx,
+ struct vmw_fence_obj *fence)
+{
+ vmw_validation_bo_fence(ctx, fence);
+ vmw_validation_res_unreserve(ctx, false);
+ if (ctx->res_mutex)
+ mutex_unlock(ctx->res_mutex);
+ vmw_validation_unref_lists(ctx);
+}
+
+/**
+ * vmw_validation_preload_bo - Preload the validation memory allocator for a
+ * call to vmw_validation_add_bo().
+ * @ctx: Pointer to the validation context.
+ *
+ * Iff this function returns successfully, the next call to
+ * vmw_validation_add_bo() is guaranteed not to sleep. An error is not fatal
+ * but voids the guarantee.
+ *
+ * Returns: Zero if successful, %-EINVAL otherwise.
+ */
+int vmw_validation_preload_bo(struct vmw_validation_context *ctx)
+{
+ unsigned int size = sizeof(struct vmw_validation_bo_node);
+
+ if (!vmw_validation_mem_alloc(ctx, size))
+ return -ENOMEM;
+
+ ctx->mem_size_left += size;
+ return 0;
+}
+
+/**
+ * vmw_validation_preload_res - Preload the validation memory allocator for a
+ * call to vmw_validation_add_res().
+ * @ctx: Pointer to the validation context.
+ * @size: Size of the validation node extra data. See below.
+ *
+ * Iff this function returns successfully, the next call to
+ * vmw_validation_add_res() with the same or smaller @size is guaranteed not to
+ * sleep. An error is not fatal but voids the guarantee.
+ *
+ * Returns: Zero if successful, %-EINVAL otherwise.
+ */
+int vmw_validation_preload_res(struct vmw_validation_context *ctx,
+ unsigned int size)
+{
+ size = vmw_validation_align(sizeof(struct vmw_validation_res_node) +
+ size) +
+ vmw_validation_align(sizeof(struct vmw_validation_bo_node));
+ if (!vmw_validation_mem_alloc(ctx, size))
+ return -ENOMEM;
+
+ ctx->mem_size_left += size;
+ return 0;
+}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_validation.h b/drivers/gpu/drm/vmwgfx/vmwgfx_validation.h
new file mode 100644
index 000000000000..b57e3292c386
--- /dev/null
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_validation.h
@@ -0,0 +1,227 @@
+/* SPDX-License-Identifier: GPL-2.0 OR MIT */
+/**************************************************************************
+ *
+ * Copyright © 2018 VMware, Inc., Palo Alto, CA., USA
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+#ifndef _VMWGFX_VALIDATION_H_
+#define _VMWGFX_VALIDATION_H_
+
+#include <drm/drm_hashtab.h>
+#include <linux/list.h>
+#include <linux/ww_mutex.h>
+#include <drm/ttm/ttm_execbuf_util.h>
+
+/**
+ * struct vmw_validation_context - Per command submission validation context
+ * @ht: Hash table used to find resource- or buffer object duplicates
+ * @resource_list: List head for resource validation metadata
+ * @resource_ctx_list: List head for resource validation metadata for
+ * resources that need to be validated before those in @resource_list
+ * @bo_list: List head for buffer objects
+ * @page_list: List of pages used by the memory allocator
+ * @ticket: Ticked used for ww mutex locking
+ * @res_mutex: Pointer to mutex used for resource reserving
+ * @merge_dups: Whether to merge metadata for duplicate resources or
+ * buffer objects
+ * @mem_size_left: Free memory left in the last page in @page_list
+ * @page_address: Kernel virtual address of the last page in @page_list
+ */
+struct vmw_validation_context {
+ struct drm_open_hash *ht;
+ struct list_head resource_list;
+ struct list_head resource_ctx_list;
+ struct list_head bo_list;
+ struct list_head page_list;
+ struct ww_acquire_ctx ticket;
+ struct mutex *res_mutex;
+ unsigned int merge_dups;
+ unsigned int mem_size_left;
+ u8 *page_address;
+};
+
+struct vmw_buffer_object;
+struct vmw_resource;
+struct vmw_fence_obj;
+
+#if 0
+/**
+ * DECLARE_VAL_CONTEXT - Declare a validation context with initialization
+ * @_name: The name of the variable
+ * @_ht: The hash table used to find dups or NULL if none
+ * @_merge_dups: Whether to merge duplicate buffer object- or resource
+ * entries. If set to true, ideally a hash table pointer should be supplied
+ * as well unless the number of resources and buffer objects per validation
+ * is known to be very small
+ */
+#endif
+#define DECLARE_VAL_CONTEXT(_name, _ht, _merge_dups) \
+ struct vmw_validation_context _name = \
+ { .ht = _ht, \
+ .resource_list = LIST_HEAD_INIT((_name).resource_list), \
+ .resource_ctx_list = LIST_HEAD_INIT((_name).resource_ctx_list), \
+ .bo_list = LIST_HEAD_INIT((_name).bo_list), \
+ .page_list = LIST_HEAD_INIT((_name).page_list), \
+ .res_mutex = NULL, \
+ .merge_dups = _merge_dups, \
+ .mem_size_left = 0, \
+ }
+
+/**
+ * vmw_validation_has_bos - return whether the validation context has
+ * any buffer objects registered.
+ *
+ * @ctx: The validation context
+ * Returns: Whether any buffer objects are registered
+ */
+static inline bool
+vmw_validation_has_bos(struct vmw_validation_context *ctx)
+{
+ return !list_empty(&ctx->bo_list);
+}
+
+/**
+ * vmw_validation_set_ht - Register a hash table for duplicate finding
+ * @ctx: The validation context
+ * @ht: Pointer to a hash table to use for duplicate finding
+ * This function is intended to be used if the hash table wasn't
+ * available at validation context declaration time
+ */
+static inline void vmw_validation_set_ht(struct vmw_validation_context *ctx,
+ struct drm_open_hash *ht)
+{
+ ctx->ht = ht;
+}
+
+/**
+ * vmw_validation_bo_reserve - Reserve buffer objects registered with a
+ * validation context
+ * @ctx: The validation context
+ * @intr: Perform waits interruptible
+ *
+ * Return: Zero on success, -ERESTARTSYS when interrupted, negative error
+ * code on failure
+ */
+static inline int
+vmw_validation_bo_reserve(struct vmw_validation_context *ctx,
+ bool intr)
+{
+ return ttm_eu_reserve_buffers(&ctx->ticket, &ctx->bo_list, intr,
+ NULL);
+}
+
+/**
+ * vmw_validation_bo_backoff - Unreserve buffer objects registered with a
+ * validation context
+ * @ctx: The validation context
+ *
+ * This function unreserves the buffer objects previously reserved using
+ * vmw_validation_bo_reserve. It's typically used as part of an error path
+ */
+static inline void
+vmw_validation_bo_backoff(struct vmw_validation_context *ctx)
+{
+ ttm_eu_backoff_reservation(&ctx->ticket, &ctx->bo_list);
+}
+
+/**
+ * vmw_validation_bo_fence - Unreserve and fence buffer objects registered
+ * with a validation context
+ * @ctx: The validation context
+ *
+ * This function unreserves the buffer objects previously reserved using
+ * vmw_validation_bo_reserve, and fences them with a fence object.
+ */
+static inline void
+vmw_validation_bo_fence(struct vmw_validation_context *ctx,
+ struct vmw_fence_obj *fence)
+{
+ ttm_eu_fence_buffer_objects(&ctx->ticket, &ctx->bo_list,
+ (void *) fence);
+}
+
+/**
+ * vmw_validation_context_init - Initialize a validation context
+ * @ctx: Pointer to the validation context to initialize
+ *
+ * This function initializes a validation context with @merge_dups set
+ * to false
+ */
+static inline void
+vmw_validation_context_init(struct vmw_validation_context *ctx)
+{
+ memset(ctx, 0, sizeof(*ctx));
+ INIT_LIST_HEAD(&ctx->resource_list);
+ INIT_LIST_HEAD(&ctx->resource_ctx_list);
+ INIT_LIST_HEAD(&ctx->bo_list);
+}
+
+/**
+ * vmw_validation_align - Align a validation memory allocation
+ * @val: The size to be aligned
+ *
+ * Returns: @val aligned to the granularity used by the validation memory
+ * allocator.
+ */
+static inline unsigned int vmw_validation_align(unsigned int val)
+{
+ return ALIGN(val, sizeof(long));
+}
+
+int vmw_validation_add_bo(struct vmw_validation_context *ctx,
+ struct vmw_buffer_object *vbo,
+ bool as_mob, bool cpu_blit);
+int vmw_validation_bo_validate_single(struct ttm_buffer_object *bo,
+ bool interruptible,
+ bool validate_as_mob);
+int vmw_validation_bo_validate(struct vmw_validation_context *ctx, bool intr);
+void vmw_validation_unref_lists(struct vmw_validation_context *ctx);
+int vmw_validation_add_resource(struct vmw_validation_context *ctx,
+ struct vmw_resource *res,
+ size_t priv_size,
+ void **p_node,
+ bool *first_usage);
+void vmw_validation_drop_ht(struct vmw_validation_context *ctx);
+int vmw_validation_res_reserve(struct vmw_validation_context *ctx,
+ bool intr);
+void vmw_validation_res_unreserve(struct vmw_validation_context *ctx,
+ bool backoff);
+void vmw_validation_res_switch_backup(struct vmw_validation_context *ctx,
+ void *val_private,
+ struct vmw_buffer_object *vbo,
+ unsigned long backup_offset);
+int vmw_validation_res_validate(struct vmw_validation_context *ctx, bool intr);
+
+int vmw_validation_prepare(struct vmw_validation_context *ctx,
+ struct mutex *mutex, bool intr);
+void vmw_validation_revert(struct vmw_validation_context *ctx);
+void vmw_validation_done(struct vmw_validation_context *ctx,
+ struct vmw_fence_obj *fence);
+
+void *vmw_validation_mem_alloc(struct vmw_validation_context *ctx,
+ unsigned int size);
+int vmw_validation_preload_bo(struct vmw_validation_context *ctx);
+int vmw_validation_preload_res(struct vmw_validation_context *ctx,
+ unsigned int size);
+#endif