summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorBen Widawsky <benjamin.widawsky@intel.com>2014-05-01 20:13:26 -0700
committerBen Widawsky <benjamin.widawsky@intel.com>2014-06-20 18:36:16 -0700
commit4a989525473be8ec59d12539e5a338889b306530 (patch)
tree5de425b4b579b940b2d167672af9b5ab4e5e8f68
parent7010548d94215db483feee750e599efb98cd8647 (diff)
drm/i915/userptr: Mirror GPU addr at ioctl (HACK/POC)gpu_mirror
This is needed for the proof of concept work that will allow mirrored GPU addressing via the existing userptr interface. Part of the hack involves passing the context ID to the ioctl in order to get a VM. v2: This patch now breaks ABI, since userptr was merged. Signed-off-by: Ben Widawsky <ben@bwidawsk.net>
-rw-r--r--drivers/gpu/drm/i915/i915_gem_userptr.c134
-rw-r--r--include/uapi/drm/i915_drm.h7
2 files changed, 109 insertions, 32 deletions
diff --git a/drivers/gpu/drm/i915/i915_gem_userptr.c b/drivers/gpu/drm/i915/i915_gem_userptr.c
index 21ea92886a56..450b28ec05e8 100644
--- a/drivers/gpu/drm/i915/i915_gem_userptr.c
+++ b/drivers/gpu/drm/i915/i915_gem_userptr.c
@@ -220,10 +220,6 @@ i915_mmu_notifier_add(struct i915_mmu_notifier *mmu,
struct interval_tree_node *it;
int ret;
- ret = i915_mutex_lock_interruptible(mmu->dev);
- if (ret)
- return ret;
-
/* Make sure we drop the final active reference (and thereby
* remove the objects from the interval tree) before we do
* the check for overlapping objects.
@@ -253,7 +249,6 @@ i915_mmu_notifier_add(struct i915_mmu_notifier *mmu,
ret = 0;
}
spin_unlock(&mmu->lock);
- mutex_unlock(&mmu->dev->struct_mutex);
return ret;
}
@@ -283,19 +278,12 @@ i915_gem_userptr_init__mmu_notifier(struct drm_i915_gem_object *obj,
return capable(CAP_SYS_ADMIN) ? 0 : -EPERM;
down_write(&obj->userptr.mm->mmap_sem);
- ret = i915_mutex_lock_interruptible(obj->base.dev);
- if (ret == 0) {
- mmu = i915_mmu_notifier_get(obj->base.dev, obj->userptr.mm);
- if (!IS_ERR(mmu))
- mmu->count++; /* preemptive add to act as a refcount */
- else
- ret = PTR_ERR(mmu);
- mutex_unlock(&obj->base.dev->struct_mutex);
- }
+ mmu = i915_mmu_notifier_get(obj->base.dev, obj->userptr.mm);
+ if (!IS_ERR(mmu))
+ mmu->count++; /* preemptive add to act as a refcount */
+ else
+ ret = PTR_ERR(mmu);
up_write(&obj->userptr.mm->mmap_sem);
- if (ret)
- return ret;
-
mn = kzalloc(sizeof(*mn), GFP_KERNEL);
if (mn == NULL) {
ret = -ENOMEM;
@@ -317,10 +305,8 @@ i915_gem_userptr_init__mmu_notifier(struct drm_i915_gem_object *obj,
free_mn:
kfree(mn);
destroy_mmu:
- mutex_lock(&obj->base.dev->struct_mutex);
if (--mmu->count == 0)
__i915_mmu_notifier_destroy(mmu);
- mutex_unlock(&obj->base.dev->struct_mutex);
return ret;
}
@@ -591,10 +577,66 @@ i915_gem_userptr_release(struct drm_i915_gem_object *obj)
static int
i915_gem_userptr_dmabuf_export(struct drm_i915_gem_object *obj)
{
+ int ret;
+
if (obj->userptr.mn)
return 0;
- return i915_gem_userptr_init__mmu_notifier(obj, 0);
+ ret = i915_mutex_lock_interruptible(obj->base.dev);
+ if (ret)
+ return ret;
+
+ ret = i915_gem_userptr_init__mmu_notifier(obj, 0);
+
+ mutex_unlock(&obj->base.dev->struct_mutex);
+
+ return ret;
+}
+
+/* Carve out the address space for later use */
+static int i915_gem_userptr_reserve_vma(struct drm_i915_gem_object *obj,
+ struct i915_address_space *vm,
+ uint64_t offset,
+ uint64_t size)
+{
+ struct i915_vma *vma;
+ int ret;
+
+ vma = i915_gem_obj_to_vma(obj, vm);
+ if (vma)
+ return -ENXIO;
+
+ vma = i915_gem_obj_lookup_or_create_vma(obj, vm);
+ if (!vma)
+ return PTR_ERR(vma);
+
+ BUG_ON(!drm_mm_initialized(&vm->mm));
+
+ if (vma->uptr) {
+ DRM_INFO("Already had a userptr\n");
+ return 0;
+ }
+ if (vma->node.allocated) {
+ DRM_INFO("Node was previously allocated\n");
+ return -EBUSY;
+ }
+
+ vma->node.start = offset;
+ vma->node.size = size;
+ vma->node.color = 0;
+ ret = drm_mm_reserve_node(&vm->mm, &vma->node);
+ if (ret) {
+ /* There are two reasons this can fail.
+ * 1. The user is using a mix of relocs and userptr, and a reloc
+ * won.
+ * TODO: handle better.
+ */
+ return ret;
+ }
+
+ vma->uptr = 1;
+
+ return 0;
}
static const struct drm_i915_gem_object_ops i915_gem_userptr_ops = {
@@ -640,37 +682,62 @@ static const struct drm_i915_gem_object_ops i915_gem_userptr_ops = {
int
i915_gem_userptr_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_file_private *file_priv = file->driver_priv;
struct drm_i915_gem_userptr *args = data;
struct drm_i915_gem_object *obj;
+ struct intel_context *ctx;
+ struct i915_address_space *vm;
int ret;
u32 handle;
+ ret = i915_mutex_lock_interruptible(dev);
+ if (ret)
+ return ret;
+
+#define goto_err(__err) do { \
+ ret = (__err); \
+ goto out; \
+} while (0)
+
+ ctx = i915_gem_context_get(file_priv, args->ctx_id);
+ if (IS_ERR(ctx))
+ goto_err(PTR_ERR(ctx));
+
+ /* i915_gem_context_reference(ctx); */
+
if (args->flags & ~(I915_USERPTR_READ_ONLY |
+ I915_USERPTR_GPU_MIRROR |
I915_USERPTR_UNSYNCHRONIZED))
- return -EINVAL;
+ goto_err(-EINVAL);
if (offset_in_page(args->user_ptr | args->user_size))
- return -EINVAL;
-
- if (args->user_size > dev_priv->gtt.base.total)
- return -E2BIG;
+ goto_err(-EINVAL);
if (!access_ok(args->flags & I915_USERPTR_READ_ONLY ? VERIFY_READ : VERIFY_WRITE,
(char __user *)(unsigned long)args->user_ptr, args->user_size))
- return -EFAULT;
+ goto_err(-EFAULT);
if (args->flags & I915_USERPTR_READ_ONLY) {
/* On almost all of the current hw, we cannot tell the GPU that a
* page is readonly, so this is just a placeholder in the uAPI.
*/
- return -ENODEV;
+ goto_err(-ENODEV);
+ }
+
+ vm = ctx->vm;
+ if (args->user_size > vm->total)
+ goto_err(-E2BIG);
+
+ if (args->flags & I915_USERPTR_GPU_MIRROR) {
+ if (!HAS_48B_PPGTT(dev))
+ goto_err(-ENODEV);
}
/* Allocate the new object */
obj = i915_gem_object_alloc(dev);
if (obj == NULL)
- return -ENOMEM;
+ goto_err(-ENOMEM);
+#undef goto_err
drm_gem_private_object_init(dev, &obj->base, args->user_size);
i915_gem_object_init(obj, &i915_gem_userptr_ops);
@@ -690,9 +757,16 @@ i915_gem_userptr_ioctl(struct drm_device *dev, void *data, struct drm_file *file
ret = i915_gem_userptr_init__mmu_notifier(obj, args->flags);
if (ret == 0)
ret = drm_gem_handle_create(file, &obj->base, &handle);
+ if (ret == 0 && args->flags & I915_USERPTR_GPU_MIRROR) {
+ ret = i915_gem_userptr_reserve_vma(obj, vm, args->user_ptr, args->user_size);
+ if (ret)
+ DRM_DEBUG_DRIVER("Failed to reserve GPU mirror %d\n", ret);
+ }
/* drop reference from allocate - handle holds it now */
- drm_gem_object_unreference_unlocked(&obj->base);
+ drm_gem_object_unreference(&obj->base);
+out:
+ mutex_unlock(&dev->struct_mutex);
if (ret)
return ret;
diff --git a/include/uapi/drm/i915_drm.h b/include/uapi/drm/i915_drm.h
index 97dea09a48ba..7652f8301cda 100644
--- a/include/uapi/drm/i915_drm.h
+++ b/include/uapi/drm/i915_drm.h
@@ -1056,15 +1056,18 @@ struct drm_i915_reset_stats {
struct drm_i915_gem_userptr {
__u64 user_ptr;
__u64 user_size;
+ __u32 ctx_id;
__u32 flags;
-#define I915_USERPTR_READ_ONLY 0x1
-#define I915_USERPTR_UNSYNCHRONIZED 0x80000000
+#define I915_USERPTR_READ_ONLY (1<<0)
+#define I915_USERPTR_GPU_MIRROR (1<<1)
+#define I915_USERPTR_UNSYNCHRONIZED (1<<31)
/**
* Returned handle for the object.
*
* Object handles are nonzero.
*/
__u32 handle;
+ __u32 pad;
};
#endif /* _UAPI_I915_DRM_H_ */