summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorDave Airlie <airlied@redhat.com>2013-12-02 16:31:49 +1000
committerDave Airlie <airlied@redhat.com>2013-12-02 16:31:49 +1000
commit55ef0bb10b00c1c513a309020d6894e54c464e2c (patch)
treeb4a46340cf841e1f521fb8b6c40c6fbbdca3701a
parentfc58ab77b876de8c05e87542bf789b360c3bb1ae (diff)
start ioctl hackings
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_drv.h44
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_fence.c2
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_gem.c11
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_ioctl.c238
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_kms.c2
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_object.c16
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_vq.c117
-rw-r--r--include/uapi/drm/virtgpu_drm.h28
8 files changed, 277 insertions, 181 deletions
diff --git a/drivers/gpu/drm/virtio/virtgpu_drv.h b/drivers/gpu/drm/virtio/virtgpu_drv.h
index 3d2a14bbe59b..9fe6119eeed7 100644
--- a/drivers/gpu/drm/virtio/virtgpu_drv.h
+++ b/drivers/gpu/drm/virtio/virtgpu_drv.h
@@ -173,6 +173,11 @@ int virtgpu_gem_init_object(struct drm_gem_object *obj);
void virtgpu_gem_free_object(struct drm_gem_object *gem_obj);
int virtgpu_gem_init(struct virtgpu_device *vgdev);
void virtgpu_gem_fini(struct virtgpu_device *vgdev);
+int virtgpu_gem_create(struct drm_file *file,
+ struct drm_device *dev,
+ uint64_t size,
+ struct drm_gem_object **obj_p,
+ uint32_t *handle_p);
struct virtgpu_object *virtgpu_alloc_object(struct drm_device *dev,
size_t size, bool kernel, bool pinned);
int virtgpu_mode_dumb_create(struct drm_file *file_priv,
@@ -194,6 +199,7 @@ int virtgpu_surface_dirty(struct virtgpu_framebuffer *qfb,
unsigned num_clips);
/* virtio vg */
int virtgpu_resource_id_get(struct virtgpu_device *vgdev, uint32_t *resid);
+void virtgpu_resource_id_put(struct virtgpu_device *vgdev, uint32_t id);
int virtgpu_cmd_create_resource(struct virtgpu_device *vgdev,
uint32_t resource_id,
uint32_t format,
@@ -226,6 +232,23 @@ int virtgpu_fill_event_vq(struct virtgpu_device *vgdev, int entries);
int virtgpu_cmd_context_create(struct virtgpu_device *vgdev, uint32_t id,
uint32_t nlen, const char *name);
int virtgpu_cmd_context_destroy(struct virtgpu_device *vgdev, uint32_t id);
+int virtgpu_cmd_context_attach_resource(struct virtgpu_device *vgdev, uint32_t ctx_id,
+ uint32_t resource_id);
+int virtgpu_cmd_context_detach_resource(struct virtgpu_device *vgdev, uint32_t ctx_id,
+ uint32_t resource_id);
+int virtgpu_cmd_submit(struct virtgpu_device *vgdev, uint64_t offset,
+ uint32_t size, uint32_t ctx_id,
+ struct virtgpu_fence **fence);
+int virtgpu_cmd_transfer_from_host_3d(struct virtgpu_device *vgdev, uint32_t resource_id,
+ uint32_t ctx_id, uint64_t offset, uint32_t level, struct virtgpu_box *box,
+ struct virtgpu_fence **fence);
+int virtgpu_cmd_transfer_to_host_3d(struct virtgpu_device *vgdev, uint32_t resource_id,
+ uint32_t ctx_id,
+ uint64_t offset, uint32_t level, struct virtgpu_box *box,
+ struct virtgpu_fence **fence);
+int virtgpu_cmd_resource_create_3d(struct virtgpu_device *vgdev,
+ struct virtgpu_resource_create_3d *rc_3d,
+ struct virtgpu_fence **fence);
/* virtgpu_display.c */
int virtgpu_framebuffer_init(struct drm_device *dev,
struct virtgpu_framebuffer *vgfb,
@@ -258,6 +281,7 @@ int virtgpu_object_kmap(struct virtgpu_object *bo, void **ptr);
int virtgpu_object_get_sg_table(struct virtgpu_device *qdev,
struct virtgpu_object *bo);
void virtgpu_object_free_sg_table(struct virtgpu_object *bo);
+int virtgpu_object_wait(struct virtgpu_object *bo, bool no_wait);
static inline struct virtgpu_object *virtgpu_object_ref(struct virtgpu_object *bo)
{
ttm_bo_reference(&bo->tbo);
@@ -281,4 +305,24 @@ static inline u64 virtgpu_object_mmap_offset(struct virtgpu_object *bo)
return drm_vma_node_offset_addr(&bo->tbo.vma_node);
}
+static inline int virtgpu_object_reserve(struct virtgpu_object *bo, bool no_wait)
+{
+ int r;
+
+ r = ttm_bo_reserve(&bo->tbo, true, no_wait, false, 0);
+ if (unlikely(r != 0)) {
+ if (r != -ERESTARTSYS) {
+ struct virtgpu_device *qdev = (struct virtgpu_device *)bo->gem_base.dev->dev_private;
+ dev_err(qdev->dev, "%p reserve failed\n", bo);
+ }
+ return r;
+ }
+ return 0;
+}
+
+static inline void virtgpu_object_unreserve(struct virtgpu_object *bo)
+{
+ ttm_bo_unreserve(&bo->tbo);
+}
+
#endif
diff --git a/drivers/gpu/drm/virtio/virtgpu_fence.c b/drivers/gpu/drm/virtio/virtgpu_fence.c
index 671ae340342e..06fc4e6c01d4 100644
--- a/drivers/gpu/drm/virtio/virtgpu_fence.c
+++ b/drivers/gpu/drm/virtio/virtgpu_fence.c
@@ -164,7 +164,7 @@ int virtgpu_fence_emit(struct virtgpu_device *vgdev,
(*fence)->seq = ++vgdev->fence_drv.sync_seq;
cmd->flags |= VIRTGPU_COMMAND_EMIT_FENCE;
- // cmd->fence_id = (*fence)->seq;
+ cmd->fence_id = (*fence)->seq;
return 0;
}
diff --git a/drivers/gpu/drm/virtio/virtgpu_gem.c b/drivers/gpu/drm/virtio/virtgpu_gem.c
index 1d44bba72898..27b5780c681b 100644
--- a/drivers/gpu/drm/virtio/virtgpu_gem.c
+++ b/drivers/gpu/drm/virtio/virtgpu_gem.c
@@ -30,12 +30,11 @@ struct virtgpu_object *virtgpu_alloc_object(struct drm_device *dev,
return obj;
}
-int
-virtgpu_gem_create(struct drm_file *file,
- struct drm_device *dev,
- uint64_t size,
- struct drm_gem_object **obj_p,
- uint32_t *handle_p)
+int virtgpu_gem_create(struct drm_file *file,
+ struct drm_device *dev,
+ uint64_t size,
+ struct drm_gem_object **obj_p,
+ uint32_t *handle_p)
{
struct virtgpu_object *obj;
int ret;
diff --git a/drivers/gpu/drm/virtio/virtgpu_ioctl.c b/drivers/gpu/drm/virtio/virtgpu_ioctl.c
index e93e30180a42..e4329c8fb415 100644
--- a/drivers/gpu/drm/virtio/virtgpu_ioctl.c
+++ b/drivers/gpu/drm/virtio/virtgpu_ioctl.c
@@ -24,9 +24,9 @@
*/
#include <drm/drmP.h>
#include "virtgpu_drv.h"
+#include <drm/virtgpu_drm.h>
#include "ttm/ttm_execbuf_util.h"
-#if 0
static void convert_to_hw_box(struct virtgpu_box *dst,
const struct drm_virtgpu_3d_box *src)
@@ -38,20 +38,18 @@ static void convert_to_hw_box(struct virtgpu_box *dst,
static int virtgpu_alloc_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
- struct virtgpu_device *vgdev = dev->dev_private;
struct drm_virtgpu_alloc *virtgpu_alloc = data;
int ret;
- struct virtgpu_bo *qobj;
+ struct drm_gem_object *obj;
uint32_t handle;
if (virtgpu_alloc->size == 0) {
DRM_ERROR("invalid size %d\n", virtgpu_alloc->size);
return -EINVAL;
}
- ret = virtgpu_gem_object_create_with_handle(vgdev, file_priv,
- 0,
- virtgpu_alloc->size,
- &qobj, &handle);
+ ret = virtgpu_gem_create(file_priv, dev,
+ virtgpu_alloc->size,
+ &obj, &handle);
if (ret) {
DRM_ERROR("%s: failed to create gem ret=%d\n",
__func__, ret);
@@ -71,7 +69,6 @@ int virtgpu_map_ioctl(struct drm_device *dev, void *data,
&virtgpu_map->offset);
}
-
/*
* Usage of execbuffer:
* Relocations need to take into account the full VIRTGPUDrawable size.
@@ -82,7 +79,7 @@ int virtgpu_execbuffer_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
struct drm_virtgpu_execbuffer *execbuffer = data;
- return virtgpu_execbuffer(dev, execbuffer, file_priv);
+ return 0;//virtgpu_execbuffer(dev, execbuffer, file_priv);
}
@@ -101,10 +98,10 @@ static int virtgpu_resource_create_ioctl(struct drm_device *dev, void *data,
struct virtgpu_vbuffer *vbuf;
int ret;
uint32_t res_id;
- struct virtgpu_bo *qobj;
+ struct virtgpu_object *qobj;
+ struct drm_gem_object *obj;
uint32_t handle = 0;
uint32_t size, pg_size;
- struct virtgpu_bo *pg_bo = NULL;
void *optr;
int si;
struct scatterlist *sg;
@@ -112,6 +109,7 @@ static int virtgpu_resource_create_ioctl(struct drm_device *dev, void *data,
struct ttm_validate_buffer mainbuf, page_info_buf;
struct virtgpu_fence *fence;
struct ww_acquire_ctx ticket;
+ struct virtgpu_resource_create_3d rc_3d;
INIT_LIST_HEAD(&validate_list);
memset(&mainbuf, 0, sizeof(struct ttm_validate_buffer));
@@ -127,93 +125,58 @@ static int virtgpu_resource_create_ioctl(struct drm_device *dev, void *data,
if (size == 0)
size = PAGE_SIZE;
- ret = virtgpu_gem_object_create_with_handle(vgdev, file_priv,
- 0,
- size,
- &qobj, &handle);
+ ret = virtgpu_gem_create(file_priv, dev,
+ size,
+ &obj, &handle);
if (ret)
goto fail_id;
+ qobj = gem_to_virtgpu_obj(obj);
/* use a gem reference since unref list undoes them */
drm_gem_object_reference(&qobj->gem_base);
mainbuf.bo = &qobj->tbo;
list_add(&mainbuf.head, &validate_list);
- if (virtgpu_create_sg == 1) {
- ret = virtgpu_bo_get_sg_table(vgdev, qobj);
- if (ret)
- goto fail_obj;
-
- pg_size = sizeof(struct virtgpu_iov_entry) * qobj->sgt->nents;
-
- ret = virtgpu_bo_create(vgdev, pg_size, true, 0, &pg_bo);
- if (ret)
- goto fail_unref;
-
- drm_gem_object_reference(&pg_bo->gem_base);
- page_info_buf.bo = &pg_bo->tbo;
- list_add(&page_info_buf.head, &validate_list);
- }
+ ret = virtgpu_object_get_sg_table(vgdev, qobj);
+ if (ret)
+ goto fail_obj;
+#if 0
ret = virtgpu_bo_list_validate(&ticket, &validate_list);
if (ret) {
printk("failed to validate\n");
goto fail_unref;
}
-
- if (virtgpu_create_sg == 1) {
- ret = virtgpu_bo_kmap(pg_bo, &optr);
- for_each_sg(qobj->sgt->sgl, sg, qobj->sgt->nents, si) {
- struct virtgpu_iov_entry *iov = ((struct virtgpu_iov_entry *)optr) + si;
- iov->addr = sg_phys(sg);
- iov->length = sg->length;
- iov->pad = 0;
- }
- virtgpu_bo_kunmap(pg_bo);
-
- qobj->is_res_bound = true;
- }
-
- cmd_p = virtgpu_alloc_cmd(vgdev, pg_bo, false, NULL, 0, &vbuf);
- memset(cmd_p, 0, sizeof(*cmd_p));
- cmd_p->type = VIRTGPU_CMD_CREATE_RESOURCE;
- cmd_p->u.res_create.handle = res_id;
- cmd_p->u.res_create.target = rc->target;
- cmd_p->u.res_create.format = rc->format;
- cmd_p->u.res_create.bind = rc->bind;
- cmd_p->u.res_create.width = rc->width;
- cmd_p->u.res_create.height = rc->height;
- cmd_p->u.res_create.depth = rc->depth;
- cmd_p->u.res_create.array_size = rc->array_size;
- cmd_p->u.res_create.last_level = rc->last_level;
- cmd_p->u.res_create.nr_samples = rc->nr_samples;
- cmd_p->u.res_create.nr_sg_entries = qobj->sgt ? qobj->sgt->nents : 0;
- cmd_p->u.res_create.flags = rc->flags;
-
- ret = virtgpu_fence_emit(vgdev, cmd_p, &fence);
-
- virtgpu_queue_cmd_buf(vgdev, vbuf);
+#endif
+ rc_3d.resource_id = res_id;
+ rc_3d.target = rc->target;
+ rc_3d.format = rc->format;
+ rc_3d.bind = rc->bind;
+ rc_3d.width = rc->width;
+ rc_3d.height = rc->height;
+ rc_3d.depth = rc->depth;
+ rc_3d.array_size = rc->array_size;
+ rc_3d.last_level = rc->last_level;
+ rc_3d.nr_samples = rc->nr_samples;
+ rc_3d.flags = 0;
+
+ ret = virtgpu_cmd_resource_create_3d(vgdev, &rc_3d, &fence);
ttm_eu_fence_buffer_objects(&ticket, &validate_list, fence);
- qobj->res_handle = res_id;
- qobj->stride = rc->stride;
+ qobj->hw_res_handle = res_id;
+// qobj->stride = rc->stride;
rc->res_handle = res_id; /* similiar to a VM address */
rc->bo_handle = handle;
- virtgpu_unref_list(&validate_list);
- if (virtgpu_create_sg == 1)
- virtgpu_bo_unref(&pg_bo);
+// virtgpu_unref_list(&validate_list);
+
return 0;
fail_unref:
- virtgpu_unref_list(&validate_list);
-fail_pg_obj:
- if (virtgpu_create_sg == 1)
- if (pg_bo)
- virtgpu_bo_unref(&pg_bo);
+// virtgpu_unref_list(&validate_list);
fail_obj:
- drm_gem_object_handle_unreference_unlocked(&qobj->gem_base);
+// drm_gem_object_handle_unreference_unlocked(obj);
fail_id:
virtgpu_resource_id_put(vgdev, res_id);
return ret;
@@ -225,143 +188,101 @@ static int virtgpu_resource_info_ioctl(struct drm_device *dev, void *data,
struct virtgpu_device *vgdev = dev->dev_private;
struct drm_virtgpu_resource_info *ri = data;
struct drm_gem_object *gobj = NULL;
- struct virtgpu_bo *qobj = NULL;
+ struct virtgpu_object *qobj = NULL;
gobj = drm_gem_object_lookup(dev, file_priv, ri->bo_handle);
if (gobj == NULL)
return -ENOENT;
- qobj = gem_to_virtgpu_bo(gobj);
+ qobj = gem_to_virtgpu_obj(gobj);
ri->size = qobj->gem_base.size;
- ri->res_handle = qobj->res_handle;
- ri->stride = qobj->stride;
+ ri->res_handle = qobj->hw_res_handle;
+// ri->stride = qobj->stride;
drm_gem_object_unreference_unlocked(gobj);
return 0;
}
-int virtgpu_resource_unref(struct virtgpu_device *vgdev, uint32_t res_handle)
-{
- struct virtgpu_command *cmd_p;
- struct virtgpu_vbuffer *vbuf;
-
- cmd_p = virtgpu_alloc_cmd(vgdev, NULL, false, NULL, 0, &vbuf);
- memset(cmd_p, 0, sizeof(*cmd_p));
- cmd_p->type = VIRTGPU_RESOURCE_UNREF;
- cmd_p->u.res_unref.res_handle = res_handle;
-
- virtgpu_queue_cmd_buf(vgdev, vbuf);
-
- virtgpu_resource_id_put(vgdev, res_handle);
- return 0;
-}
-
-static int virtgpu_transfer_get_ioctl(struct drm_device *dev, void *data,
- struct drm_file *file)
+static int virtgpu_transfer_from_host_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file)
{
struct virtgpu_device *vgdev = dev->dev_private;
struct virtgpu_fpriv *vfpriv = file->driver_priv;
- struct drm_virtgpu_3d_transfer_get *args = data;
- struct virtgpu_command *cmd_p;
- struct virtgpu_vbuffer *vbuf;
+ struct drm_virtgpu_3d_transfer_from_host *args = data;
struct drm_gem_object *gobj = NULL;
- struct virtgpu_bo *qobj = NULL;
+ struct virtgpu_object *qobj = NULL;
struct virtgpu_fence *fence;
int ret;
u32 offset = args->offset;
+ struct virtgpu_box box;
gobj = drm_gem_object_lookup(dev, file, args->bo_handle);
if (gobj == NULL)
return -ENOENT;
- qobj = gem_to_virtgpu_bo(gobj);
+ qobj = gem_to_virtgpu_obj(gobj);
- ret = virtgpu_bo_reserve(qobj, false);
+ ret = virtgpu_object_reserve(qobj, false);
if (ret)
goto out;
- virtgpu_ttm_placement_from_domain(qobj, qobj->type);
ret = ttm_bo_validate(&qobj->tbo, &qobj->placement,
true, false);
if (unlikely(ret))
goto out_unres;
- cmd_p = virtgpu_alloc_cmd(vgdev, qobj, true, &offset, 0, &vbuf);
-
- cmd_p->type = VIRTGPU_TRANSFER_GET;
-
- cmd_p->u.transfer_get.res_handle = qobj->res_handle;
- convert_to_hw_box(&cmd_p->u.transfer_get.box, &args->box);
- cmd_p->u.transfer_get.level = args->level;
- cmd_p->u.transfer_get.data = offset;
- cmd_p->u.transfer_get.ctx_id = vfpriv->ctx_id;
- cmd_p->u.transfer_get.stride = args->stride;
- cmd_p->u.transfer_get.layer_stride = args->layer_stride;
- ret = virtgpu_fence_emit(vgdev, cmd_p, &fence);
-
- virtgpu_queue_cmd_buf(vgdev, vbuf);
-
- qobj->tbo.sync_obj = vgdev->mman.bdev.driver->sync_obj_ref(fence);
+ convert_to_hw_box(&box, &args->box);
+ ret = virtgpu_cmd_transfer_from_host_3d(vgdev, qobj->hw_res_handle,
+ vfpriv->ctx_id, offset,
+ args->level, &box, &fence);
+ if (!ret)
+ qobj->tbo.sync_obj = vgdev->mman.bdev.driver->sync_obj_ref(fence);
+
out_unres:
- virtgpu_bo_unreserve(qobj);
+ virtgpu_object_unreserve(qobj);
out:
drm_gem_object_unreference_unlocked(gobj);
- return 0;
+ return ret;
}
-static int virtgpu_transfer_put_ioctl(struct drm_device *dev, void *data,
- struct drm_file *file)
+static int virtgpu_transfer_to_host_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file)
{
struct virtgpu_device *vgdev = dev->dev_private;
struct virtgpu_fpriv *vfpriv = file->driver_priv;
- struct drm_virtgpu_3d_transfer_put *args = data;
- struct virtgpu_command *cmd_p;
- struct virtgpu_vbuffer *vbuf;
+ struct drm_virtgpu_3d_transfer_to_host *args = data;
struct drm_gem_object *gobj = NULL;
- struct virtgpu_bo *qobj = NULL;
+ struct virtgpu_object *qobj = NULL;
struct virtgpu_fence *fence;
+ struct virtgpu_box box;
int ret;
u32 offset = args->offset;
- u32 max_size = 0;
gobj = drm_gem_object_lookup(dev, file, args->bo_handle);
if (gobj == NULL)
return -ENOENT;
- qobj = gem_to_virtgpu_bo(gobj);
+ qobj = gem_to_virtgpu_obj(gobj);
- ret = virtgpu_bo_reserve(qobj, false);
+ ret = virtgpu_object_reserve(qobj, false);
if (ret)
goto out;
- virtgpu_ttm_placement_from_domain(qobj, qobj->type);
ret = ttm_bo_validate(&qobj->tbo, &qobj->placement,
true, false);
if (unlikely(ret))
goto out_unres;
- if (args->box.h == 1 && args->box.d == 1 &&
- args->box.y == 0 && args->box.z == 0) {
- max_size = args->box.w;
- }
- cmd_p = virtgpu_alloc_cmd(vgdev, qobj, false, &offset, max_size, &vbuf);
- memset(cmd_p, 0, sizeof(*cmd_p));
- cmd_p->type = VIRTGPU_TRANSFER_PUT;
- cmd_p->u.transfer_put.res_handle = qobj->res_handle;
- convert_to_hw_box(&cmd_p->u.transfer_put.box, &args->box);
- cmd_p->u.transfer_put.level = args->level;
- cmd_p->u.transfer_put.stride = args->stride;
- cmd_p->u.transfer_put.layer_stride = args->layer_stride;
- cmd_p->u.transfer_put.data = offset;
- cmd_p->u.transfer_put.ctx_id = vfpriv->ctx_id;
- ret = virtgpu_fence_emit(vgdev, cmd_p, &fence);
- virtgpu_queue_cmd_buf(vgdev, vbuf);
-
- qobj->tbo.sync_obj = vgdev->mman.bdev.driver->sync_obj_ref(fence);
+ convert_to_hw_box(&box, &args->box);
+ ret = virtgpu_cmd_transfer_to_host_3d(vgdev, qobj->hw_res_handle,
+ vfpriv->ctx_id, offset,
+ args->level, &box, &fence);
+ if (!ret)
+ qobj->tbo.sync_obj = vgdev->mman.bdev.driver->sync_obj_ref(fence);
out_unres:
- virtgpu_bo_unreserve(qobj);
+ virtgpu_object_unreserve(qobj);
out:
drm_gem_object_unreference_unlocked(gobj);
return 0;
@@ -372,7 +293,7 @@ static int virtgpu_wait_ioctl(struct drm_device *dev, void *data,
{
struct drm_virtgpu_3d_wait *args = data;
struct drm_gem_object *gobj = NULL;
- struct virtgpu_bo *qobj = NULL;
+ struct virtgpu_object *qobj = NULL;
int ret;
bool nowait = false;
@@ -380,16 +301,17 @@ static int virtgpu_wait_ioctl(struct drm_device *dev, void *data,
if (gobj == NULL)
return -ENOENT;
- qobj = gem_to_virtgpu_bo(gobj);
+ qobj = gem_to_virtgpu_obj(gobj);
if (args->flags & VIRTGPU_WAIT_NOWAIT)
nowait = true;
- ret = virtgpu_wait(qobj, nowait);
+ ret = virtgpu_object_wait(qobj, nowait);
drm_gem_object_unreference_unlocked(gobj);
return ret;
}
+#if 0
static int virtgpu_get_caps_ioctl(struct drm_device *dev,
void *data, struct drm_file *file)
{
@@ -406,6 +328,7 @@ static int virtgpu_get_caps_ioctl(struct drm_device *dev,
args->handle = handle;
return 0;
}
+#endif
struct drm_ioctl_desc virtgpu_ioctls[] = {
DRM_IOCTL_DEF_DRV(VIRTGPU_ALLOC, virtgpu_alloc_ioctl, DRM_AUTH|DRM_UNLOCKED),
@@ -422,14 +345,15 @@ struct drm_ioctl_desc virtgpu_ioctls[] = {
DRM_IOCTL_DEF_DRV(VIRTGPU_RESOURCE_INFO, virtgpu_resource_info_ioctl, DRM_AUTH|DRM_UNLOCKED),
/* make transfer async to the main ring? - no sure, can we
thread these in the underlying GL */
- DRM_IOCTL_DEF_DRV(VIRTGPU_TRANSFER_GET, virtgpu_transfer_get_ioctl, DRM_AUTH|DRM_UNLOCKED),
- DRM_IOCTL_DEF_DRV(VIRTGPU_TRANSFER_PUT, virtgpu_transfer_put_ioctl, DRM_AUTH|DRM_UNLOCKED),
+ DRM_IOCTL_DEF_DRV(VIRTGPU_TRANSFER_FROM_HOST, virtgpu_transfer_from_host_ioctl, DRM_AUTH|DRM_UNLOCKED),
+ DRM_IOCTL_DEF_DRV(VIRTGPU_TRANSFER_TO_HOST, virtgpu_transfer_to_host_ioctl, DRM_AUTH|DRM_UNLOCKED),
DRM_IOCTL_DEF_DRV(VIRTGPU_WAIT, virtgpu_wait_ioctl, DRM_AUTH|DRM_UNLOCKED),
- DRM_IOCTL_DEF_DRV(VIRTGPU_GET_CAPS, virtgpu_get_caps_ioctl, DRM_AUTH|DRM_UNLOCKED),
+// DRM_IOCTL_DEF_DRV(VIRTGPU_GET_CAPS, virtgpu_get_caps_ioctl, DRM_AUTH|DRM_UNLOCKED),
};
int virtgpu_max_ioctls = DRM_ARRAY_SIZE(virtgpu_ioctls);
-#endif
+
+
diff --git a/drivers/gpu/drm/virtio/virtgpu_kms.c b/drivers/gpu/drm/virtio/virtgpu_kms.c
index b25fe6cd1597..231afc2efc1f 100644
--- a/drivers/gpu/drm/virtio/virtgpu_kms.c
+++ b/drivers/gpu/drm/virtio/virtgpu_kms.c
@@ -3,8 +3,6 @@
#include <drm/drmP.h>
#include "virtgpu_drv.h"
-int virtgpu_max_ioctls;
-
static int virtgpu_ctx_id_get(struct virtgpu_device *vgdev, uint32_t *resid)
{
int handle;
diff --git a/drivers/gpu/drm/virtio/virtgpu_object.c b/drivers/gpu/drm/virtio/virtgpu_object.c
index 009aae27c969..40fc40267c99 100644
--- a/drivers/gpu/drm/virtio/virtgpu_object.c
+++ b/drivers/gpu/drm/virtio/virtgpu_object.c
@@ -168,3 +168,19 @@ void virtgpu_object_free_sg_table(struct virtgpu_object *bo)
kfree(bo->pages);
bo->pages = NULL;
}
+
+int virtgpu_object_wait(struct virtgpu_object *bo, bool no_wait)
+{
+ int r;
+
+ r = ttm_bo_reserve(&bo->tbo, true, no_wait, false, 0);
+ if (unlikely(r != 0))
+ return r;
+ spin_lock(&bo->tbo.bdev->fence_lock);
+ if (bo->tbo.sync_obj)
+ r = ttm_bo_wait(&bo->tbo, true, true, no_wait);
+ spin_unlock(&bo->tbo.bdev->fence_lock);
+ ttm_bo_unreserve(&bo->tbo);
+ return r;
+}
+
diff --git a/drivers/gpu/drm/virtio/virtgpu_vq.c b/drivers/gpu/drm/virtio/virtgpu_vq.c
index 4a93711b81ba..1ba292d8c0f7 100644
--- a/drivers/gpu/drm/virtio/virtgpu_vq.c
+++ b/drivers/gpu/drm/virtio/virtgpu_vq.c
@@ -326,7 +326,6 @@ int virtgpu_cmd_unref_resource(struct virtgpu_device *vgdev,
cmd_p->u.resource_unref.resource_id = resource_id;
virtgpu_queue_ctrl_buffer(vgdev, vbuf);
-
return 0;
}
@@ -492,6 +491,122 @@ int virtgpu_cmd_context_destroy(struct virtgpu_device *vgdev, uint32_t id)
return 0;
}
+int virtgpu_cmd_context_attach_resource(struct virtgpu_device *vgdev, uint32_t ctx_id,
+ uint32_t resource_id)
+{
+ struct virtgpu_command *cmd_p;
+ struct virtgpu_vbuffer *vbuf;
+
+ cmd_p = virtgpu_alloc_cmd(vgdev, &vbuf);
+ memset(cmd_p, 0, sizeof(*cmd_p));
+
+ cmd_p->type = VIRTGPU_CMD_CTX_ATTACH_RESOURCE;
+ cmd_p->u.ctx_resource.ctx_id = ctx_id;
+ cmd_p->u.ctx_resource.resource_id = resource_id;
+ virtgpu_queue_ctrl_buffer(vgdev, vbuf);
+ return 0;
+
+}
+
+int virtgpu_cmd_context_detach_resource(struct virtgpu_device *vgdev, uint32_t ctx_id,
+ uint32_t resource_id)
+{
+ struct virtgpu_command *cmd_p;
+ struct virtgpu_vbuffer *vbuf;
+
+ cmd_p = virtgpu_alloc_cmd(vgdev, &vbuf);
+ memset(cmd_p, 0, sizeof(*cmd_p));
+
+ cmd_p->type = VIRTGPU_CMD_CTX_DETACH_RESOURCE;
+ cmd_p->u.ctx_resource.ctx_id = ctx_id;
+ cmd_p->u.ctx_resource.resource_id = resource_id;
+ virtgpu_queue_ctrl_buffer(vgdev, vbuf);
+ return 0;
+}
+
+int virtgpu_cmd_resource_create_3d(struct virtgpu_device *vgdev,
+ struct virtgpu_resource_create_3d *rc_3d,
+ struct virtgpu_fence **fence)
+{
+ struct virtgpu_command *cmd_p;
+ struct virtgpu_vbuffer *vbuf;
+
+ cmd_p = virtgpu_alloc_cmd(vgdev, &vbuf);
+ memset(cmd_p, 0, sizeof(*cmd_p));
+
+ cmd_p->type = VIRTGPU_CMD_RESOURCE_CREATE_3D;
+ cmd_p->u.resource_create_3d = *rc_3d;
+ if (fence)
+ virtgpu_fence_emit(vgdev, cmd_p, fence);
+ virtgpu_queue_ctrl_buffer(vgdev, vbuf);
+ return 0;
+}
+
+int virtgpu_cmd_transfer_to_host_3d(struct virtgpu_device *vgdev, uint32_t resource_id,
+ uint32_t ctx_id,
+ uint64_t offset, uint32_t level, struct virtgpu_box *box,
+ struct virtgpu_fence **fence)
+{
+ struct virtgpu_command *cmd_p;
+ struct virtgpu_vbuffer *vbuf;
+
+ cmd_p = virtgpu_alloc_cmd(vgdev, &vbuf);
+ memset(cmd_p, 0, sizeof(*cmd_p));
+
+ cmd_p->type = VIRTGPU_CMD_TRANSFER_TO_HOST_3D;
+ cmd_p->u.transfer_to_host_3d.ctx_id = ctx_id;
+ cmd_p->u.transfer_to_host_3d.resource_id = resource_id;
+ cmd_p->u.transfer_to_host_3d.box = *box;
+ cmd_p->u.transfer_to_host_3d.data = offset;
+ cmd_p->u.transfer_to_host_3d.level = level;
+ if (fence)
+ virtgpu_fence_emit(vgdev, cmd_p, fence);
+ virtgpu_queue_ctrl_buffer(vgdev, vbuf);
+ return 0;
+}
+
+int virtgpu_cmd_transfer_from_host_3d(struct virtgpu_device *vgdev, uint32_t resource_id,
+ uint32_t ctx_id, uint64_t offset, uint32_t level, struct virtgpu_box *box,
+ struct virtgpu_fence **fence)
+{
+ struct virtgpu_command *cmd_p;
+ struct virtgpu_vbuffer *vbuf;
+
+ cmd_p = virtgpu_alloc_cmd(vgdev, &vbuf);
+ memset(cmd_p, 0, sizeof(*cmd_p));
+
+ cmd_p->type = VIRTGPU_CMD_TRANSFER_FROM_HOST_3D;
+ cmd_p->u.transfer_from_host_3d.ctx_id = ctx_id;
+ cmd_p->u.transfer_from_host_3d.resource_id = resource_id;
+ cmd_p->u.transfer_from_host_3d.box = *box;
+ cmd_p->u.transfer_from_host_3d.data = offset;
+ cmd_p->u.transfer_from_host_3d.level = level;
+ if (fence)
+ virtgpu_fence_emit(vgdev, cmd_p, fence);
+ virtgpu_queue_ctrl_buffer(vgdev, vbuf);
+ return 0;
+}
+
+int virtgpu_cmd_submit(struct virtgpu_device *vgdev, uint64_t offset,
+ uint32_t size, uint32_t ctx_id,
+ struct virtgpu_fence **fence)
+{
+ struct virtgpu_command *cmd_p;
+ struct virtgpu_vbuffer *vbuf;
+
+ cmd_p = virtgpu_alloc_cmd(vgdev, &vbuf);
+ memset(cmd_p, 0, sizeof(*cmd_p));
+
+ cmd_p->type = VIRTGPU_CMD_SUBMIT_3D;
+ cmd_p->u.cmd_submit.phy_addr = offset;
+ cmd_p->u.cmd_submit.size = size;
+ cmd_p->u.cmd_submit.ctx_id = ctx_id;
+ if (fence)
+ virtgpu_fence_emit(vgdev, cmd_p, fence);
+ virtgpu_queue_ctrl_buffer(vgdev, vbuf);
+ return 0;
+}
+
int virtgpu_object_attach(struct virtgpu_device *vgdev, struct virtgpu_object *obj, uint32_t resource_id)
{
uint32_t sz;
diff --git a/include/uapi/drm/virtgpu_drm.h b/include/uapi/drm/virtgpu_drm.h
index 0ec78f90dc30..f4e4cccf8e7b 100644
--- a/include/uapi/drm/virtgpu_drm.h
+++ b/include/uapi/drm/virtgpu_drm.h
@@ -40,8 +40,8 @@
#define DRM_VIRTGPU_GETPARAM 0x03
#define DRM_VIRTGPU_RESOURCE_CREATE 0x04
#define DRM_VIRTGPU_RESOURCE_INFO 0x05
-#define DRM_VIRTGPU_TRANSFER_GET 0x06
-#define DRM_VIRTGPU_TRANSFER_PUT 0x07
+#define DRM_VIRTGPU_TRANSFER_FROM_HOST 0x06
+#define DRM_VIRTGPU_TRANSFER_TO_HOST 0x07
#define DRM_VIRTGPU_WAIT 0x08
#define DRM_VIRTGPU_GET_CAPS 0x09
@@ -102,22 +102,22 @@ struct drm_virtgpu_3d_box {
uint32_t w, h, d;
};
-struct drm_virtgpu_3d_transfer_put {
+struct drm_virtgpu_3d_transfer_to_host {
uint32_t bo_handle;
struct drm_virtgpu_3d_box box;
uint32_t level;
uint32_t offset;
- uint32_t stride;
- uint32_t layer_stride;
+ // uint32_t stride;
+ // uint32_t layer_stride;
};
-struct drm_virtgpu_3d_transfer_get {
+struct drm_virtgpu_3d_transfer_from_host {
uint32_t bo_handle;
struct drm_virtgpu_3d_box box;
uint32_t level;
uint32_t offset;
- uint32_t stride;
- uint32_t layer_stride;
+ // uint32_t stride;
+ // uint32_t layer_stride;
};
#define VIRTGPU_WAIT_NOWAIT 1 /* like it */
@@ -152,13 +152,13 @@ struct drm_virtgpu_get_caps {
DRM_IOWR(DRM_COMMAND_BASE + DRM_VIRTGPU_RESOURCE_INFO, \
struct drm_virtgpu_resource_info)
-#define DRM_IOCTL_VIRTGPU_TRANSFER_GET \
- DRM_IOWR(DRM_COMMAND_BASE + DRM_VIRTGPU_TRANSFER_GET, \
- struct drm_virtgpu_3d_transfer_get)
+#define DRM_IOCTL_VIRTGPU_TRANSFER_FROM_HOST \
+ DRM_IOWR(DRM_COMMAND_BASE + DRM_VIRTGPU_TRANSFER_FROM_HOST, \
+ struct drm_virtgpu_3d_transfer_from_host)
-#define DRM_IOCTL_VIRTGPU_TRANSFER_PUT \
- DRM_IOWR(DRM_COMMAND_BASE + DRM_VIRTGPU_TRANSFER_PUT, \
- struct drm_virtgpu_3d_transfer_put)
+#define DRM_IOCTL_VIRTGPU_TRANSFER_TO_HOST \
+ DRM_IOWR(DRM_COMMAND_BASE + DRM_VIRTGPU_TRANSFER_TO_HOST, \
+ struct drm_virtgpu_3d_transfer_to_host)
#define DRM_IOCTL_VIRTGPU_WAIT \
DRM_IOWR(DRM_COMMAND_BASE + DRM_VIRTGPU_WAIT, \