summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorDave Airlie <airlied@redhat.com>2013-10-15 18:21:36 +0100
committerDave Airlie <airlied@redhat.com>2013-10-15 18:21:36 +0100
commitf7bad4e72b160dbcdb7d3ed8080cc33a79f394e2 (patch)
tree96cea2c7141c942b308ebbbf6864a80a3e228ae5
parent2ebe202350d12ee5ce0c9bacbffcc1d8dee2e909 (diff)
move some stuff around port over more fbdev
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_drv.h16
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_fb.c157
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_vq.c181
-rw-r--r--drivers/gpu/drm/virtio/virtio_hw.h15
4 files changed, 358 insertions, 11 deletions
diff --git a/drivers/gpu/drm/virtio/virtgpu_drv.h b/drivers/gpu/drm/virtio/virtgpu_drv.h
index 690afc030a7b..c42a3bff337c 100644
--- a/drivers/gpu/drm/virtio/virtgpu_drv.h
+++ b/drivers/gpu/drm/virtio/virtgpu_drv.h
@@ -102,6 +102,8 @@ int virtgpu_gem_init_object(struct drm_gem_object *obj);
void virtgpu_gem_free_object(struct drm_gem_object *gem_obj);
int virtgpu_gem_init(struct virtgpu_device *qdev);
void virtgpu_gem_fini(struct virtgpu_device *qdev);
+struct virtgpu_object *virtgpu_alloc_object(struct drm_device *dev,
+ size_t size);
/* virtio_fb */
#define VIRTGPUFB_CONN_LIMIT 1
@@ -109,6 +111,20 @@ int virtgpu_fbdev_init(struct virtgpu_device *vgdev);
void virtgpu_fbdev_fini(struct virtgpu_device *vgdev);
/* virtio vg */
+int virtgpu_resource_id_get(struct virtgpu_device *vgdev, uint32_t *resid);
+int virtgpu_cmd_create_resource(struct virtgpu_device *vgdev,
+ uint32_t resource_id,
+ uint32_t format,
+ uint32_t width,
+ uint32_t height);
+int virtgpu_cmd_transfer_send_2d(struct virtgpu_device *vgdev,
+ uint32_t resource_id, uint32_t offset,
+ uint32_t width, uint32_t height,
+ uint32_t x, uint32_t y);
+int virtgpu_cmd_resource_flush(struct virtgpu_device *vgdev,
+ uint32_t resource_id,
+ uint32_t width, uint32_t height,
+ uint32_t x, uint32_t y);
void virtgpu_ctrl_ack(struct virtqueue *vq);
void virtgpu_dequeue_work_func(struct work_struct *work);
diff --git a/drivers/gpu/drm/virtio/virtgpu_fb.c b/drivers/gpu/drm/virtio/virtgpu_fb.c
index 9309e6994b92..b5222ffe7821 100644
--- a/drivers/gpu/drm/virtio/virtgpu_fb.c
+++ b/drivers/gpu/drm/virtio/virtgpu_fb.c
@@ -1,6 +1,9 @@
#include <drm/drmP.h>
#include <drm/drm_fb_helper.h>
#include "virtgpu_drv.h"
+#include "virtio_hw.h"
+
+#define VIRTGPU_FBCON_POLL_PERIOD (HZ / 60)
struct virtgpu_fbdev {
struct drm_fb_helper helper;
@@ -9,14 +12,127 @@ struct virtgpu_fbdev {
struct virtgpu_device *vgdev;
struct delayed_work work;
};
+#define DL_ALIGN_UP(x, a) ALIGN(x, a)
+#define DL_ALIGN_DOWN(x, a) ALIGN(x-(a-1), a)
+
+static int virtgpu_dirty_update(struct virtgpu_framebuffer *fb, bool store,
+ int x, int y, int width, int height)
+{
+ struct drm_device *dev = fb->base.dev;
+ struct virtgpu_device *vgdev = dev->dev_private;
+ bool store_for_later = false;
+ int aligned_x;
+ int bpp = (fb->base.bits_per_pixel / 8);
+ int x2, y2;
+ unsigned long flags;
+ struct virtgpu_object *obj = gem_to_virtgpu_obj(fb->obj);
+ int size;
+ aligned_x = DL_ALIGN_DOWN(x, sizeof(unsigned long));
+ width = DL_ALIGN_UP(width + (x-aligned_x), sizeof(unsigned long));
+ x = aligned_x;
+
+ if ((width <= 0) ||
+ (x + width > fb->base.width) ||
+ (y + height > fb->base.height)) {
+ printk("values out of range %d %d %dx%d %dx%d\n", x, y, width, height, fb->base.width, fb->base.height);
+ return -EINVAL;
+ }
+
+ /* if we are in atomic just store the info
+ can't test inside spin lock */
+ if (in_atomic() || store)
+ store_for_later = true;
+
+ x2 = x + width - 1;
+ y2 = y + height - 1;
+
+ spin_lock_irqsave(&fb->dirty_lock, flags);
+
+ if (fb->y1 < y)
+ y = fb->y1;
+ if (fb->y2 > y2)
+ y2 = fb->y2;
+ if (fb->x1 < x)
+ x = fb->x1;
+ if (fb->x2 > x2)
+ x2 = fb->x2;
+
+ if (store_for_later) {
+ fb->x1 = x;
+ fb->x2 = x2;
+ fb->y1 = y;
+ fb->y2 = y2;
+ spin_unlock_irqrestore(&fb->dirty_lock, flags);
+ return 0;
+ }
+
+ fb->x1 = fb->y1 = INT_MAX;
+ fb->x2 = fb->y2 = 0;
+
+ spin_unlock_irqrestore(&fb->dirty_lock, flags);
+
+ {
+ uint32_t offset;
+ uint32_t w = x2 - x + 1;
+ uint32_t h = y2 - y + 1;
+
+ offset = (y * fb->base.pitches[0]) + x * bpp;
+
+ virtgpu_cmd_transfer_send_2d(vgdev, obj->hw_res_handle,
+ offset, w, h, x, y);
+
+ }
+ virtgpu_cmd_resource_flush(vgdev, obj->hw_res_handle, x2 - x + 1, y2 - y + 1, x, y);
+
+ return 0;
+}
+
+static void virtgpu_fb_dirty_work(struct work_struct *work)
+{
+ struct delayed_work *delayed_work = to_delayed_work(work);
+ struct virtgpu_fbdev *vfbdev = container_of(delayed_work, struct virtgpu_fbdev, work);
+ struct virtgpu_framebuffer *vgfb = &vfbdev->vgfb;
+
+ virtgpu_dirty_update(&vfbdev->vgfb, false, vgfb->x1, vgfb->y1, vgfb->x2 - vgfb->x1, vgfb->y2 - vgfb->y1);
+}
+
+static void virtgpu_3d_fillrect(struct fb_info *info,
+ const struct fb_fillrect *rect)
+{
+ struct virtgpu_fbdev *vfbdev = info->par;
+ sys_fillrect(info, rect);
+ virtgpu_dirty_update(&vfbdev->vgfb, true, rect->dx, rect->dy, rect->width,
+ rect->height);
+ schedule_delayed_work(&vfbdev->work, VIRTGPU_FBCON_POLL_PERIOD);
+}
+
+static void virtgpu_3d_copyarea(struct fb_info *info,
+ const struct fb_copyarea *area)
+{
+ struct virtgpu_fbdev *vfbdev = info->par;
+ sys_copyarea(info, area);
+ virtgpu_dirty_update(&vfbdev->vgfb, true, area->dx, area->dy,
+ area->width, area->height);
+ schedule_delayed_work(&vfbdev->work, VIRTGPU_FBCON_POLL_PERIOD);
+}
+
+static void virtgpu_3d_imageblit(struct fb_info *info,
+ const struct fb_image *image)
+{
+ struct virtgpu_fbdev *vfbdev = info->par;
+ sys_imageblit(info, image);
+ virtgpu_dirty_update(&vfbdev->vgfb, true, image->dx, image->dy,
+ image->width, image->height);
+ schedule_delayed_work(&vfbdev->work, VIRTGPU_FBCON_POLL_PERIOD);
+}
static struct fb_ops virtgpufb_ops = {
.owner = THIS_MODULE,
.fb_check_var = drm_fb_helper_check_var,
.fb_set_par = drm_fb_helper_set_par, /* TODO: copy vmwgfx */
-// .fb_fillrect = virgl_3d_fillrect,
-/// .fb_copyarea = virgl_3d_copyarea,
-// .fb_imageblit = virgl_3d_imageblit,
+ .fb_fillrect = virtgpu_3d_fillrect,
+ .fb_copyarea = virtgpu_3d_copyarea,
+ .fb_imageblit = virtgpu_3d_imageblit,
.fb_pan_display = drm_fb_helper_pan_display,
.fb_blank = drm_fb_helper_blank,
.fb_setcmap = drm_fb_helper_setcmap,
@@ -30,13 +146,17 @@ static int virtgpufb_create(struct drm_fb_helper *helper,
struct virtgpu_fbdev *vfbdev =
container_of(helper, struct virtgpu_fbdev, helper);
struct drm_device *dev = helper->dev;
+ struct virtgpu_device *vgdev = dev->dev_private;
struct fb_info *info;
struct drm_framebuffer *fb;
struct drm_mode_fb_cmd2 mode_cmd = {};
struct virtgpu_object *obj;
struct device *device = &dev->pdev->dev;
+ uint32_t resid, format, size;
int ret;
+ if (sizes->surface_bpp == 24)
+ sizes->surface_bpp = 32;
mode_cmd.width = sizes->surface_width;
mode_cmd.height = sizes->surface_height;
mode_cmd.pitches[0] = mode_cmd.width * ((sizes->surface_bpp + 7) / 8);
@@ -44,6 +164,35 @@ static int virtgpufb_create(struct drm_fb_helper *helper,
sizes->surface_depth);
+ if (mode_cmd.pixel_format == DRM_FORMAT_XRGB8888)
+ format = VIRGL_FORMAT_B8G8R8X8_UNORM;
+ else if (mode_cmd.pixel_format == DRM_FORMAT_ARGB8888)
+ format = VIRGL_FORMAT_B8G8R8A8_UNORM;
+ else if (mode_cmd.pixel_format == DRM_FORMAT_XRGB1555)
+ format = VIRGL_FORMAT_B5G5R5A1_UNORM;
+ else {
+ ret = -EINVAL;
+ goto fail;
+ }
+
+ size = mode_cmd.pitches[0] * mode_cmd.height;
+ obj = virtgpu_alloc_object(dev, size);
+ if (!obj) {
+ ret = -ENOMEM;
+ goto fail;
+ }
+
+ ret = virtgpu_resource_id_get(vgdev, &resid);
+ if (ret)
+ goto fail;
+
+ obj->hw_res_handle = resid;
+
+ ret = virtgpu_cmd_create_resource(vgdev, resid,
+ format, mode_cmd.width, mode_cmd.height);
+ if (ret)
+ goto fail;
+
info = framebuffer_alloc(0, device);
if (!info) {
ret = -ENOMEM;
@@ -116,7 +265,7 @@ int virtgpu_fbdev_init(struct virtgpu_device *vgdev)
vgfbdev->vgdev = vgdev;
vgdev->vgfbdev = vgfbdev;
vgfbdev->helper.funcs = &virtgpu_fb_helper_funcs;
- // INIT_DELAYED_WORK(&vgfbdev->work, virtgpu_fb_dirty_work);
+ INIT_DELAYED_WORK(&vgfbdev->work, virtgpu_fb_dirty_work);
ret = drm_fb_helper_init(vgdev->ddev, &vgfbdev->helper,
1 /* num_crtc - VIRTGPU supports just 1 */,
diff --git a/drivers/gpu/drm/virtio/virtgpu_vq.c b/drivers/gpu/drm/virtio/virtgpu_vq.c
index 5ac09015c7c7..005ea6a509ea 100644
--- a/drivers/gpu/drm/virtio/virtgpu_vq.c
+++ b/drivers/gpu/drm/virtio/virtgpu_vq.c
@@ -2,6 +2,33 @@
#include "virtgpu_drv.h"
#include <linux/virtio.h>
#include <linux/virtio_ring.h>
+#include "virtio_hw.h"
+
+int virtgpu_resource_id_get(struct virtgpu_device *vgdev, uint32_t *resid)
+{
+ int handle;
+ int idr_ret = -ENOMEM;
+again:
+ if (idr_pre_get(&vgdev->resource_idr, GFP_KERNEL) == 0) {
+ goto fail;
+ }
+ spin_lock(&vgdev->resource_idr_lock);
+ idr_ret = idr_get_new_above(&vgdev->resource_idr, NULL, 1, &handle);
+ spin_unlock(&vgdev->resource_idr_lock);
+ if (idr_ret == -EAGAIN)
+ goto again;
+
+ *resid = handle;
+fail:
+ return idr_ret;
+}
+
+void virtgpu_resource_id_put(struct virtgpu_device *vgdev, uint32_t id)
+{
+ spin_lock(&vgdev->resource_idr_lock);
+ idr_remove(&vgdev->resource_idr, id);
+ spin_unlock(&vgdev->resource_idr_lock);
+}
void virtgpu_ctrl_ack(struct virtqueue *vq)
{
@@ -10,8 +37,8 @@ void virtgpu_ctrl_ack(struct virtqueue *vq)
schedule_work(&vgdev->dequeue_work);
}
-struct virtgpu_vbuffer *virtgpu_allocate_vbuf(struct virtgpu_device *vgdev,
- int size)
+static struct virtgpu_vbuffer *virtgpu_allocate_vbuf(struct virtgpu_device *vgdev,
+ int size)
{
struct virtgpu_vbuffer *vbuf;
@@ -28,6 +55,20 @@ fail:
return ERR_PTR(-ENOMEM);
}
+struct virtgpu_command *virtgpu_alloc_cmd(struct virtgpu_device *vgdev,
+ struct virtgpu_vbuffer **vbuffer_p)
+{
+ struct virtgpu_vbuffer *vbuf;
+
+ vbuf = virtgpu_allocate_vbuf(vgdev, sizeof(struct virtgpu_command));
+ if (IS_ERR(vbuf)) {
+ *vbuffer_p = NULL;
+ return ERR_CAST(vbuf);
+ }
+ *vbuffer_p = vbuf;
+ return (struct virtgpu_command *)vbuf->buf;
+}
+
static void free_vbuf(struct virtgpu_vbuffer *vbuf)
{
kfree(vbuf);
@@ -102,18 +143,152 @@ retry:
return ret;
}
+/* just create gem objects for userspace and long lived objects,
+ just use dma_alloced pages for the queue objects? */
+
+/* create a basic resource */
int virtgpu_cmd_create_resource(struct virtgpu_device *vgdev,
uint32_t resource_id,
uint32_t format,
uint32_t width,
uint32_t height)
{
-
+ struct virtgpu_command *cmd_p;
+ struct virtgpu_vbuffer *vbuf;
+
+ cmd_p = virtgpu_alloc_cmd(vgdev, &vbuf);
+ memset(cmd_p, 0, sizeof(*cmd_p));
+
+ cmd_p->type = VIRTGPU_CMD_RESOURCE_CREATE_2D;
+ cmd_p->u.resource_create_2d.resource_id = resource_id;
+ cmd_p->u.resource_create_2d.format = format;
+ cmd_p->u.resource_create_2d.width = width;
+ cmd_p->u.resource_create_2d.height = height;
+
+ virtgpu_queue_ctrl_buffer(vgdev, vbuf);
+
+ return 0;
+}
+
+int virtgpu_cmd_unref_resource(struct virtgpu_device *vgdev,
+ uint32_t resource_id)
+{
+ struct virtgpu_command *cmd_p;
+ struct virtgpu_vbuffer *vbuf;
+
+ cmd_p = virtgpu_alloc_cmd(vgdev, &vbuf);
+ memset(cmd_p, 0, sizeof(*cmd_p));
+ cmd_p->type = VIRTGPU_CMD_RESOURCE_UNREF;
+ cmd_p->u.resource_unref.resource_id = resource_id;
+
+ virtgpu_queue_ctrl_buffer(vgdev, vbuf);
+
+ return 0;
}
+/* we want to attach backing store and invalidate it for GEM objects
+ only so pass the GEM object then internally allocate pages to store
+ the sg lists to give to the host */
int virtgpu_cmd_attach_status_page(struct virtgpu_device *vgdev,
uint64_t page_addr)
{
}
+
+
+int virtgpu_cmd_resource_inval_backing(struct virtgpu_device *vgdev,
+ uint32_t resource_id)
+{
+ struct virtgpu_command *cmd_p;
+ struct virtgpu_vbuffer *vbuf;
+
+ cmd_p = virtgpu_alloc_cmd(vgdev, &vbuf);
+ memset(cmd_p, 0, sizeof(*cmd_p));
+
+ cmd_p->type = VIRTGPU_CMD_RESOURCE_INVAL_BACKING;
+ cmd_p->u.resource_inval_backing.resource_id = resource_id;
+
+ virtgpu_queue_ctrl_buffer(vgdev, vbuf);
+
+ return 0;
+}
+
+int virtgpu_cmd_set_scanout(struct virtgpu_device *vgdev,
+ uint32_t scanout_id, uint32_t resource_id,
+ uint32_t width, uint32_t height,
+ uint32_t x, uint32_t y)
+{
+ struct virtgpu_command *cmd_p;
+ struct virtgpu_vbuffer *vbuf;
+
+ cmd_p = virtgpu_alloc_cmd(vgdev, &vbuf);
+ memset(cmd_p, 0, sizeof(*cmd_p));
+
+ cmd_p->type = VIRTGPU_CMD_SET_SCANOUT;
+ cmd_p->u.set_scanout.resource_id = resource_id;
+ cmd_p->u.set_scanout.scanout_id = scanout_id;
+ cmd_p->u.set_scanout.width = width;
+ cmd_p->u.set_scanout.height = height;
+ cmd_p->u.set_scanout.x = x;
+ cmd_p->u.set_scanout.y = y;
+
+ virtgpu_queue_ctrl_buffer(vgdev, vbuf);
+
+ return 0;
+}
+
+int virtgpu_cmd_resource_flush(struct virtgpu_device *vgdev,
+ uint32_t resource_id,
+ uint32_t width, uint32_t height,
+ uint32_t x, uint32_t y)
+{
+ struct virtgpu_command *cmd_p;
+ struct virtgpu_vbuffer *vbuf;
+
+ cmd_p = virtgpu_alloc_cmd(vgdev, &vbuf);
+ memset(cmd_p, 0, sizeof(*cmd_p));
+
+ cmd_p->type = VIRTGPU_CMD_RESOURCE_FLUSH;
+ cmd_p->u.resource_flush.resource_id = resource_id;
+ cmd_p->u.resource_flush.width = width;
+ cmd_p->u.resource_flush.height = height;
+ cmd_p->u.resource_flush.x = x;
+ cmd_p->u.resource_flush.y = y;
+
+ virtgpu_queue_ctrl_buffer(vgdev, vbuf);
+
+ return 0;
+}
+
+int virtgpu_cmd_transfer_send_2d(struct virtgpu_device *vgdev,
+ uint32_t resource_id, uint32_t offset,
+ uint32_t width, uint32_t height,
+ uint32_t x, uint32_t y)
+{
+ struct virtgpu_command *cmd_p;
+ struct virtgpu_vbuffer *vbuf;
+
+ cmd_p = virtgpu_alloc_cmd(vgdev, &vbuf);
+ memset(cmd_p, 0, sizeof(*cmd_p));
+
+ cmd_p->type = VIRTGPU_CMD_TRANSFER_SEND_2D;
+ cmd_p->u.transfer_send_2d.resource_id = resource_id;
+ cmd_p->u.transfer_send_2d.offset = offset;
+ cmd_p->u.transfer_send_2d.width = width;
+ cmd_p->u.transfer_send_2d.height = height;
+ cmd_p->u.transfer_send_2d.x = x;
+ cmd_p->u.transfer_send_2d.y = y;
+
+ virtgpu_queue_ctrl_buffer(vgdev, vbuf);
+
+ return 0;
+}
+
+int virtgpu_cmd_resource_attach_backing(struct virtgpu_device *vgdev)
+{
+
+
+}
+
+
diff --git a/drivers/gpu/drm/virtio/virtio_hw.h b/drivers/gpu/drm/virtio/virtio_hw.h
index 0929e0a90617..0029b3da5d0b 100644
--- a/drivers/gpu/drm/virtio/virtio_hw.h
+++ b/drivers/gpu/drm/virtio/virtio_hw.h
@@ -16,11 +16,11 @@ enum virtgpu_ctrl_cmd {
};
struct virtgpu_hw_status_page {
- uint64_t fence_id;
uint32_t cursor_x, cursor_y;
uint32_t cursor_hot_x, cursor_hot_y;
uint32_t cursor_id;
uint32_t error_state;
+ uint64_t fence_id;
};
struct virtgpu_attach_status_page {
@@ -48,6 +48,14 @@ struct virtgpu_set_scanout {
uint32_t y;
};
+struct virtgpu_resource_flush {
+ uint32_t resource_id;
+ uint32_t width;
+ uint32_t height;
+ uint32_t x;
+ uint32_t y;
+};
+
/* simple transfer send */
struct virtgpu_transfer_send_2d {
uint32_t resource_id;
@@ -86,16 +94,15 @@ struct virtgpu_display_info {
} pmodes[VIRTGPU_MAX_SCANOUTS];
};
-#define VIRTGPU_COMMAND_EMIT_FENCE (1 << 0)
-
struct virtgpu_command {
uint32_t type;
uint32_t flags;
- uint64_t fence;
+ uint64_t rsvd;
union virtgpu_cmds {
struct virtgpu_attach_status_page attach_status_page;
struct virtgpu_resource_create_2d resource_create_2d;
struct virtgpu_resource_unref resource_unref;
+ struct virtgpu_resource_flush resource_flush;
struct virtgpu_set_scanout set_scanout;
struct virtgpu_transfer_send_2d transfer_send_2d;
struct virtgpu_resource_attach_backing resource_attach_backing;