summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorDave Airlie <airlied@gmail.com>2013-09-09 14:08:16 +1000
committerDave Airlie <airlied@gmail.com>2013-09-09 14:08:16 +1000
commitbd799e04e015b23587e23f5d5f6b9bec2d3e63ce (patch)
tree7bd759356b69a8d62cdce40d3958a917d236f7d0
parentf92ebf1f3ba73e2c06b3acca0dcc55229054c89d (diff)
more bits
-rw-r--r--drivers/gpu/drm/virtio/Makefile2
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_fence.c151
-rw-r--r--drivers/gpu/drm/virtio/virtio_drv.h42
-rw-r--r--drivers/gpu/drm/virtio/virtio_hw.h101
-rw-r--r--drivers/gpu/drm/virtio/virtio_kms.c7
-rw-r--r--drivers/gpu/drm/virtio/virtio_vq.c103
6 files changed, 400 insertions, 6 deletions
diff --git a/drivers/gpu/drm/virtio/Makefile b/drivers/gpu/drm/virtio/Makefile
index 3d62b402f6d2..f0e90ebb2068 100644
--- a/drivers/gpu/drm/virtio/Makefile
+++ b/drivers/gpu/drm/virtio/Makefile
@@ -4,6 +4,6 @@
ccflags-y := -Iinclude/drm
-virtio-gpu-y := virtio_drv.o virtio_kms.o virtio_drm_bus.o virtio_gem.o virtio_ttm.o virtio_object.o virtio_fb.o virtio_display.o
+virtio-gpu-y := virtio_drv.o virtio_kms.o virtio_drm_bus.o virtio_gem.o virtio_ttm.o virtio_object.o virtio_fb.o virtio_display.o virtio_vq.o
obj-$(CONFIG_DRM_VIRTIO_GPU)+= virtio-gpu.o
diff --git a/drivers/gpu/drm/virtio/virtgpu_fence.c b/drivers/gpu/drm/virtio/virtgpu_fence.c
new file mode 100644
index 000000000000..5d8fc074b540
--- /dev/null
+++ b/drivers/gpu/drm/virtio/virtgpu_fence.c
@@ -0,0 +1,151 @@
+#include <drm/drmP.h>
+#include "virtio_drv.h"
+
+static void virtgpu_fence_destroy(struct kref *kref)
+{
+ struct virtgpu_fence *fence;
+
+ fence = container_of(kref, struct virtgpu_fence, kref);
+ kfree(fence);
+}
+
+struct virtgpu_fence *virtgpu_fence_ref(struct virtgpu_fence *fence)
+{
+ kref_get(&fence->kref);
+ return fence;
+}
+
+void virtgpu_fence_unref(struct virtgpu_fence **fence)
+{
+ struct virtgpu_fence *tmp = *fence;
+
+ *fence = NULL;
+ if (tmp) {
+ kref_put(&tmp->kref, virtgpu_fence_destroy);
+ }
+}
+
+static bool virtgpu_fence_seq_signaled(struct virtgpu_device *qdev, u64 seq, bool process)
+{
+ if (atomic64_read(&qdev->fence_drv.last_seq) >= seq)
+ return true;
+
+ if (process)
+ virtgpu_fence_process(qdev);
+
+ if (atomic64_read(&qdev->fence_drv.last_seq) >= seq)
+ return true;
+ return false;
+}
+
+static int virtgpu_fence_wait_seq(struct virtgpu_device *qdev, u64 target_seq,
+ bool intr)
+{
+ uint64_t timeout, last_activity;
+ uint64_t seq;
+ bool signaled;
+ int r;
+
+ while (target_seq > atomic64_read(&qdev->fence_drv.last_seq)) {
+
+ timeout = jiffies - VIRTGPU_FENCE_JIFFIES_TIMEOUT;
+ if (time_after(qdev->fence_drv.last_activity, timeout)) {
+ /* the normal case, timeout is somewhere before last_activity */
+ timeout = qdev->fence_drv.last_activity - timeout;
+ } else {
+ /* either jiffies wrapped around, or no fence was signaled in the last 500ms
+ * anyway we will just wait for the minimum amount and then check for a lockup
+ */
+ timeout = 1;
+ }
+ seq = atomic64_read(&qdev->fence_drv.last_seq);
+ /* Save current last activity valuee, used to check for GPU lockups */
+ last_activity = qdev->fence_drv.last_activity;
+
+ // radeon_irq_kms_sw_irq_get(rdev, ring);
+ if (intr) {
+ r = wait_event_interruptible_timeout(qdev->fence_queue,
+ (signaled = virtgpu_fence_seq_signaled(qdev, target_seq, true)),
+ timeout);
+ } else {
+ r = wait_event_timeout(qdev->fence_queue,
+ (signaled = virtgpu_fence_seq_signaled(qdev, target_seq, true)),
+ timeout);
+ }
+ // radeon_irq_kms_sw_irq_put(rdev, ring);
+ if (unlikely(r < 0)) {
+ return r;
+ }
+
+ if (unlikely(!signaled)) {
+ /* we were interrupted for some reason and fence
+ * isn't signaled yet, resume waiting */
+ if (r) {
+ continue;
+ }
+
+ /* check if sequence value has changed since last_activity */
+ if (seq != atomic64_read(&qdev->fence_drv.last_seq)) {
+ continue;
+ }
+
+ /* test if somebody else has already decided that this is a lockup */
+ if (last_activity != qdev->fence_drv.last_activity) {
+ continue;
+ }
+
+ }
+ }
+ return 0;
+}
+
+bool virtgpu_fence_signaled(struct virtgpu_fence *fence, bool process)
+{
+ if (!fence)
+ return true;
+
+ if (fence->seq == VIRTGPU_FENCE_SIGNALED_SEQ)
+ return true;
+
+ if (virtgpu_fence_seq_signaled(fence->qdev, fence->seq, process)) {
+ fence->seq = VIRTGPU_FENCE_SIGNALED_SEQ;
+ return true;
+ }
+ return false;
+}
+
+int virtgpu_fence_wait(struct virtgpu_fence *fence, bool intr)
+{
+ int r;
+
+ if (fence == NULL)
+ return -EINVAL;
+
+ r = virtgpu_fence_wait_seq(fence->qdev, fence->seq,
+ intr);
+ if (r)
+ return r;
+
+ fence->seq = VIRTGPU_FENCE_SIGNALED_SEQ;
+
+ return 0;
+
+}
+
+int virtgpu_fence_emit(struct virtgpu_device *qdev,
+ struct virtgpu_command *cmd,
+ struct virtgpu_fence **fence)
+{
+ *fence = kmalloc(sizeof(struct virtgpu_fence), GFP_KERNEL);
+ if ((*fence) == NULL)
+ return -ENOMEM;
+
+ kref_init(&((*fence)->kref));
+ (*fence)->qdev = qdev;
+ (*fence)->seq = ++qdev->fence_drv.sync_seq;
+
+ cmd->flags |= VIRTGPU_COMMAND_EMIT_FENCE;
+ cmd->fence_id = (*fence)->seq;
+
+ return 0;
+}
diff --git a/drivers/gpu/drm/virtio/virtio_drv.h b/drivers/gpu/drm/virtio/virtio_drv.h
index 18c0ed10cf03..42e22fca8db0 100644
--- a/drivers/gpu/drm/virtio/virtio_drv.h
+++ b/drivers/gpu/drm/virtio/virtio_drv.h
@@ -37,7 +37,31 @@ struct virtgpu_bo {
uint32_t hw_res_handle;
};
#define gem_to_virtgpu_bo(gobj) container_of((gobj), struct virtgpu_bo, gem_base)
-
+
+struct virtgpu_fence_driver {
+ atomic64_t last_seq;
+ uint64_t last_activity;
+ bool initialized;
+ uint64_t sync_seq;
+
+ spinlock_t event_lock;
+ struct list_head event_list;
+ uint64_t first_seq_event_list;
+};
+
+struct virtgpu_fence {
+ struct virtgpu_device *qdev;
+ struct kref kref;
+ uint64_t seq;
+};
+
+struct virtgpu_vbuffer {
+ char *buf;
+ int size;
+
+ struct list_head destroy_list;
+};
+
struct virtgpu_crtc {
struct drm_crtc base;
int cur_x;
@@ -83,6 +107,17 @@ struct virtgpu_device {
struct virtgpu_fbdev *vgfbdev;
struct virtqueue *ctrlq;
+ spinlock_t ctrlq_lock;
+ wait_queue_head_t ctrl_ack_queue;
+ struct work_struct dequeue_work;
+
+ struct idr resource_idr;
+ spinlock_t resource_idr_lock;
+
+ struct virtgpu_fence_driver fence_drv;
+ wait_queue_head_t fence_queue;
+
+ int num_outputs;
};
int virtgpu_driver_load(struct drm_device *dev, unsigned long flags);
@@ -105,4 +140,9 @@ extern void virtgpu_bo_unref(struct virtgpu_bo **bo);
#define VIRTGPUFB_CONN_LIMIT 1
int virtgpu_fbdev_init(struct virtgpu_device *vgdev);
void virtgpu_fbdev_fini(struct virtgpu_device *vgdev);
+
+/* virtio vg */
+void virtgpu_ctrl_ack(struct virtqueue *vq);
+void virtgpu_dequeue_work_func(struct work_struct *work);
+
#endif
diff --git a/drivers/gpu/drm/virtio/virtio_hw.h b/drivers/gpu/drm/virtio/virtio_hw.h
new file mode 100644
index 000000000000..01bb200b4672
--- /dev/null
+++ b/drivers/gpu/drm/virtio/virtio_hw.h
@@ -0,0 +1,101 @@
+#ifndef VIRTGPU_HW_H
+#define VIRTGPU_HW_H
+
+enum virtgpu_ctrl_cmd {
+ VIRTGPU_CMD_NOP,
+ VIRTGPU_CMD_ATTACH_STATUS_PAGE,
+ VIRTGPU_CMD_GET_DISPLAY_INFO,
+ VIRTGPU_CMD_GET_CAPS,
+ VIRTGPU_CMD_RESOURCE_CREATE_2D,
+ VIRTGPU_CMD_RESOURCE_UNREF,
+ VIRTGPU_CMD_SET_SCANOUT,
+ VIRTGPU_CMD_RESOURCE_FLUSH,
+ VIRTGPU_CMD_TRANSFER_SEND_2D,
+ VIRTGPU_CMD_RESOURCE_ATTACH_BACKING,
+ VIRTGPU_CMD_RESOURCE_INVAL_BACKING,
+};
+
+struct virtgpu_hw_status_page {
+ uint64_t fence_id;
+ uint32_t cursor_x, cursor_y;
+ uint32_t cursor_hot_x, cursor_hot_y;
+ uint32_t cursor_id;
+ uint32_t error_state;
+};
+
+struct virtgpu_attach_status_page {
+ uint64_t page_address;
+};
+
+struct virtgpu_resource_unref {
+ uint32_t resource_id;
+};
+
+/* create a simple 2d resource with a format */
+struct virtgpu_resource_create_2d {
+ uint32_t resource_id;
+ uint32_t format;
+ uint32_t width;
+ uint32_t height;
+};
+
+struct virtgpu_set_scanout {
+ uint32_t scanout_id;
+ uint32_t resource_id;
+ uint32_t width;
+ uint32_t height;
+ uint32_t x;
+ uint32_t y;
+};
+
+/* simple transfer send */
+struct virtgpu_transfer_send_2d {
+ uint32_t resource_id;
+ uint32_t offset;
+ uint32_t width;
+ uint32_t height;
+ uint32_t x;
+ uint32_t y;
+};
+
+#define VIRTGPU_MAX_SCANOUTS 16
+struct virtgpu_display_info {
+ uint32_t num_scanouts;
+ struct {
+ uint32_t enabled;
+ uint32_t width;
+ uint32_t height;
+ uint32_t x;
+ uint32_t y;
+ uint32_t flags;
+ } pmodes[VIRTGPU_MAX_SCANOUTS];
+};
+
+#define VIRTGPU_COMMAND_EMIT_FENCE (1 << 0)
+
+struct virtgpu_command {
+ uint32_t type;
+ uint32_t flags;
+ uint64_t fence;
+ union virtgpu_cmds {
+ struct virtgpu_attach_status_page attach_status_page;
+ struct virtgpu_resource_create_2d resource_create_2d;
+ struct virtgpu_resource_unref resource_unref;
+ struct virtgpu_set_scanout set_scanout;
+ struct virtgpu_transfer_send_2d transfer_send_2d;
+ } u;
+};
+
+/* simple formats for fbcon/X use */
+enum virtgpu_formats {
+ VIRGL_FORMAT_B8G8R8A8_UNORM = 1,
+ VIRGL_FORMAT_B8G8R8X8_UNORM = 2,
+ VIRGL_FORMAT_A8R8G8B8_UNORM = 3,
+ VIRGL_FORMAT_X8R8G8B8_UNORM = 4,
+
+ VIRGL_FORMAT_B5G5R5A1_UNORM = 5,
+
+ VIRGL_FORMAT_R8_UNORM = 64,
+};
+
+#endif
diff --git a/drivers/gpu/drm/virtio/virtio_kms.c b/drivers/gpu/drm/virtio/virtio_kms.c
index 59d98a6103ef..22075fe337e6 100644
--- a/drivers/gpu/drm/virtio/virtio_kms.c
+++ b/drivers/gpu/drm/virtio/virtio_kms.c
@@ -5,10 +5,6 @@
int virtgpu_max_ioctls;
-static void virtgpu_ctrl_ack(struct virtqueue *vq)
-{
-
-}
int virtgpu_driver_load(struct drm_device *dev, unsigned long flags)
{
@@ -28,6 +24,9 @@ int virtgpu_driver_load(struct drm_device *dev, unsigned long flags)
dev->dev_private = vgdev;
vgdev->vdev = dev->virtdev;
+ init_waitqueue_head(&vgdev->ctrl_ack_queue);
+ INIT_WORK(&vgdev->dequeue_work, virtgpu_dequeue_work_func);
+
nvqs = 1;
ret = vgdev->vdev->config->find_vqs(vgdev->vdev, nvqs, vqs, callbacks, names);
diff --git a/drivers/gpu/drm/virtio/virtio_vq.c b/drivers/gpu/drm/virtio/virtio_vq.c
new file mode 100644
index 000000000000..6ed4184c6349
--- /dev/null
+++ b/drivers/gpu/drm/virtio/virtio_vq.c
@@ -0,0 +1,103 @@
+#include <drm/drmP.h>
+#include "virtio_drv.h"
+#include <linux/virtio.h>
+#include <linux/virtio_ring.h>
+
+void virtgpu_ctrl_ack(struct virtqueue *vq)
+{
+ struct drm_device *dev = vq->vdev->priv;
+ struct virtgpu_device *vgdev = dev->dev_private;
+ schedule_work(&vgdev->dequeue_work);
+}
+
+struct virtgpu_vbuffer *virtgpu_allocate_vbuf(struct virtgpu_device *vgdev,
+ int size)
+{
+ struct virtgpu_vbuffer *vbuf;
+
+ vbuf = kmalloc(sizeof(*vbuf) + size, GFP_KERNEL);
+ if (!vbuf)
+ goto fail;
+
+ vbuf->buf = (void *)vbuf + sizeof(*vbuf);
+ vbuf->size = size;
+
+ return vbuf;
+fail:
+ kfree(vbuf);
+ return ERR_PTR(-ENOMEM);
+}
+
+static void free_vbuf(struct virtgpu_vbuffer *vbuf)
+{
+ kfree(vbuf);
+}
+
+static int reclaim_vbufs(struct virtqueue *vq, struct list_head *reclaim_list)
+{
+ struct virtgpu_vbuffer *vbuf;
+ unsigned int len;
+ int freed = 0;
+ while ((vbuf = virtqueue_get_buf(vq, &len))) {
+ list_add(&vbuf->destroy_list, reclaim_list);
+ freed++;
+ }
+ return freed;
+}
+
+void virtgpu_dequeue_work_func(struct work_struct *work)
+{
+ struct virtgpu_device *vgdev = container_of(work, struct virtgpu_device,
+ dequeue_work);
+ int ret;
+ struct list_head reclaim_list;
+ struct virtgpu_vbuffer *entry, *tmp;
+
+ INIT_LIST_HEAD(&reclaim_list);
+ spin_lock(&vgdev->ctrlq_lock);
+ do {
+ virtqueue_disable_cb(vgdev->ctrlq);
+ ret = reclaim_vbufs(vgdev->ctrlq, &reclaim_list);
+ if (ret == 0)
+ printk("cleaned 0 buffers wierd\n");
+
+ } while (!virtqueue_enable_cb(vgdev->ctrlq));
+ spin_unlock(&vgdev->ctrlq_lock);
+
+ list_for_each_entry_safe(entry, tmp, &reclaim_list, destroy_list) {
+ list_del(&entry->destroy_list);
+ free_vbuf(entry);
+ }
+ wake_up(&vgdev->ctrl_ack_queue);
+}
+
+int virtgpu_queue_ctrl_buffer(struct virtgpu_device *vgdev,
+ struct virtgpu_vbuffer *vbuf)
+{
+ struct virtqueue *vq = vgdev->ctrlq;
+ struct scatterlist *sgs[2], vcmd;
+ int outcnt, incnt = 0;
+ int ret;
+
+ sg_init_one(&vcmd, vbuf->buf, vbuf->size);
+ sgs[0] = &vcmd;
+ outcnt = 1;
+
+ spin_lock(&vgdev->ctrlq_lock);
+retry:
+ ret = virtqueue_add_sgs(vq, sgs, outcnt, incnt, vbuf, GFP_ATOMIC);
+ if (ret == -ENOSPC) {
+ spin_unlock(&vgdev->ctrlq_lock);
+ wait_event(vgdev->ctrl_ack_queue, vq->num_free);
+ spin_lock(&vgdev->ctrlq_lock);
+ goto retry;
+ } else {
+ virtqueue_kick(vq);
+ }
+
+ spin_unlock(&vgdev->ctrlq_lock);
+
+ if (!ret)
+ ret = vq->num_free;
+ return ret;
+}