summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorDave Airlie <airlied@redhat.com>2012-12-21 01:52:00 +1000
committerDave Airlie <airlied@redhat.com>2012-12-21 01:52:00 +1000
commitd8b433168fd40c5a7e5545dc57d9cc738551276d (patch)
tree0d33913aa8b1bf08df5bc1f06ecf253a091a0333
parent827c8e95f6d460c9f0f387f85b6e39d784164138 (diff)
dma-buf: lockless my assprime-todo
-rw-r--r--drivers/gpu/drm/drm_gem.c5
-rw-r--r--drivers/gpu/drm/drm_prime.c33
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_dmabuf.c19
-rw-r--r--drivers/gpu/drm/i915/i915_gem_dmabuf.c27
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_prime.c22
-rw-r--r--drivers/gpu/drm/radeon/radeon_prime.c19
-rw-r--r--include/drm/drmP.h14
7 files changed, 76 insertions, 63 deletions
diff --git a/drivers/gpu/drm/drm_gem.c b/drivers/gpu/drm/drm_gem.c
index d85d47a4174f..6baf0b62126a 100644
--- a/drivers/gpu/drm/drm_gem.c
+++ b/drivers/gpu/drm/drm_gem.c
@@ -94,7 +94,8 @@ drm_gem_init(struct drm_device *dev)
spin_lock_init(&dev->object_name_lock);
idr_init(&dev->object_name_idr);
-
+ mutex_init(&dev->prime_lock);
+
mm = kzalloc(sizeof(struct drm_gem_mm), GFP_KERNEL);
if (!mm) {
DRM_ERROR("out of memory\n");
@@ -204,10 +205,12 @@ EXPORT_SYMBOL(drm_gem_object_alloc);
static void
drm_gem_remove_prime_handles(struct drm_gem_object *obj, struct drm_file *filp)
{
+ mutex_lock(&obj->dev->prime_lock);
if (obj->dma_buf) {
drm_prime_remove_buf_handle(&filp->prime,
obj->dma_buf);
}
+ mutex_unlock(&obj->dev->prime_lock);
}
/**
diff --git a/drivers/gpu/drm/drm_prime.c b/drivers/gpu/drm/drm_prime.c
index 405e27c17f22..bcc9cd205cb4 100644
--- a/drivers/gpu/drm/drm_prime.c
+++ b/drivers/gpu/drm/drm_prime.c
@@ -80,7 +80,7 @@ int drm_gem_prime_handle_to_fd(struct drm_device *dev,
if (!obj)
return -ENOENT;
- mutex_lock(&file_priv->prime.lock);
+ mutex_lock(&dev->prime_lock);
/* re-export the original imported object */
if (obj->dma_buf)
goto out_have_obj;
@@ -98,6 +98,7 @@ int drm_gem_prime_handle_to_fd(struct drm_device *dev,
/* if we've exported this buffer the cheat and add it to the import list
* so we get the correct handle back
*/
+ mutex_lock(&file_priv->prime.lock);
ret = drm_prime_add_exported_buf_handle(&file_priv->prime,
obj->dma_buf, handle);
if (ret) {
@@ -105,11 +106,14 @@ int drm_gem_prime_handle_to_fd(struct drm_device *dev,
}
*prime_fd = dma_buf_fd(buf, flags);
mutex_unlock(&file_priv->prime.lock);
+ mutex_unlock(&obj->dev->prime_lock);
+
return 0;
out_have_obj:
get_dma_buf(obj->dma_buf);
+ mutex_lock(&file_priv->prime.lock);
/* we should have a buf handle for this case */
{
uint32_t exp_handle;
@@ -122,10 +126,12 @@ out_have_obj:
goto out;
}
}
+ mutex_unlock(&file_priv->prime.lock);
+
*prime_fd = dma_buf_fd(obj->dma_buf, flags);
out:
drm_gem_object_unreference_unlocked(obj);
- mutex_unlock(&file_priv->prime.lock);
+ mutex_unlock(&obj->dev->prime_lock);
return ret;
}
EXPORT_SYMBOL(drm_gem_prime_handle_to_fd);
@@ -141,10 +147,11 @@ int drm_gem_prime_fd_to_handle(struct drm_device *dev,
if (IS_ERR(dma_buf))
return PTR_ERR(dma_buf);
+ mutex_lock(&dev->prime_lock);
mutex_lock(&file_priv->prime.lock);
-
ret = drm_prime_lookup_buf_handle(&file_priv->prime,
dma_buf, handle);
+ mutex_unlock(&file_priv->prime.lock);
if (!ret) {
ret = 0;
goto out_put;
@@ -162,14 +169,15 @@ int drm_gem_prime_fd_to_handle(struct drm_device *dev,
if (ret)
goto out_put;
+ mutex_lock(&file_priv->prime.lock);
ret = drm_prime_add_imported_buf_handle(&file_priv->prime,
dma_buf, *handle);
+ mutex_unlock(&file_priv->prime.lock);
if (ret)
goto fail;
obj->dma_buf = dma_buf;
-
- mutex_unlock(&file_priv->prime.lock);
+ mutex_unlock(&dev->prime_lock);
return 0;
fail:
@@ -179,7 +187,7 @@ fail:
drm_gem_object_handle_unreference_unlocked(obj);
out_put:
dma_buf_put(dma_buf);
- mutex_unlock(&file_priv->prime.lock);
+ mutex_unlock(&dev->prime_lock);
return ret;
}
EXPORT_SYMBOL(drm_gem_prime_fd_to_handle);
@@ -290,11 +298,13 @@ int drm_prime_sg_to_page_addr_arrays(struct sg_table *sgt, struct page **pages,
return 0;
}
EXPORT_SYMBOL(drm_prime_sg_to_page_addr_arrays);
+
/* helper function to cleanup a GEM/prime object */
void drm_prime_gem_destroy(struct drm_gem_object *obj, struct sg_table *sg)
{
struct dma_buf_attachment *attach;
struct dma_buf *dma_buf;
+
attach = obj->import_attach;
if (sg)
dma_buf_unmap_attachment(attach, sg, DMA_BIDIRECTIONAL);
@@ -381,3 +391,14 @@ void drm_prime_remove_buf_handle(struct drm_prime_file_private *prime_fpriv, str
mutex_unlock(&prime_fpriv->lock);
}
EXPORT_SYMBOL(drm_prime_remove_buf_handle);
+
+void drm_gem_prime_release(struct dma_buf *dma_buf)
+{
+ struct drm_gem_object *obj = dma_buf->priv;
+
+ mutex_lock(&obj->dev->prime_lock);
+ obj->dma_buf = NULL;
+ mutex_unlock(&obj->dev->prime_lock);
+ drm_gem_object_unreference_unlocked(obj);
+}
+EXPORT_SYMBOL(drm_gem_prime_release);
diff --git a/drivers/gpu/drm/exynos/exynos_drm_dmabuf.c b/drivers/gpu/drm/exynos/exynos_drm_dmabuf.c
index d4bd6debcdcd..6504d527029c 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_dmabuf.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_dmabuf.c
@@ -76,7 +76,8 @@ static struct sg_table *
enum dma_data_direction dir)
{
struct exynos_drm_dmabuf_attachment *exynos_attach = attach->priv;
- struct exynos_drm_gem_obj *gem_obj = attach->dmabuf->priv;
+ struct drm_gem_object *gobj = attach->dmabuf->priv;
+ struct exynos_drm_gem_obj *gem_obj = to_exynos_gem_obj(gobj);
struct drm_device *dev = gem_obj->base.dev;
struct exynos_drm_gem_buf *buf;
struct scatterlist *rd, *wr;
@@ -145,15 +146,6 @@ static void exynos_gem_unmap_dma_buf(struct dma_buf_attachment *attach,
/* Nothing to do. */
}
-static void exynos_dmabuf_release(struct dma_buf *dmabuf)
-{
- struct exynos_drm_gem_obj *exynos_gem_obj = dmabuf->priv;
-
- DRM_DEBUG_PRIME("%s\n", __FILE__);
-
- drm_gem_object_unreference_unlocked(&exynos_gem_obj->base);
-}
-
static void *exynos_gem_dmabuf_kmap_atomic(struct dma_buf *dma_buf,
unsigned long page_num)
{
@@ -199,7 +191,7 @@ static struct dma_buf_ops exynos_dmabuf_ops = {
.kunmap = exynos_gem_dmabuf_kunmap,
.kunmap_atomic = exynos_gem_dmabuf_kunmap_atomic,
.mmap = exynos_gem_dmabuf_mmap,
- .release = exynos_dmabuf_release,
+ .release = drm_gem_prime_release,
};
struct dma_buf *exynos_dmabuf_prime_export(struct drm_device *drm_dev,
@@ -226,9 +218,8 @@ struct drm_gem_object *exynos_dmabuf_prime_import(struct drm_device *drm_dev,
/* is this one of own objects? */
if (dma_buf->ops == &exynos_dmabuf_ops) {
struct drm_gem_object *obj;
-
- exynos_gem_obj = dma_buf->priv;
- obj = &exynos_gem_obj->base;
+ obj = dma_buf->priv;
+ exynos_gem_obj = to_exynos_gem_object(obj);
/* is it from our device? */
if (obj->dev == drm_dev) {
diff --git a/drivers/gpu/drm/i915/i915_gem_dmabuf.c b/drivers/gpu/drm/i915/i915_gem_dmabuf.c
index ee9c002bb628..dc20e305a160 100644
--- a/drivers/gpu/drm/i915/i915_gem_dmabuf.c
+++ b/drivers/gpu/drm/i915/i915_gem_dmabuf.c
@@ -30,7 +30,8 @@
static struct sg_table *i915_gem_map_dma_buf(struct dma_buf_attachment *attachment,
enum dma_data_direction dir)
{
- struct drm_i915_gem_object *obj = attachment->dmabuf->priv;
+ struct drm_gem_object *gobj = attachment->dmabuf->priv;
+ struct drm_i915_gem_object *obj = to_intel_bo(gobj);
struct sg_table *st;
struct scatterlist *src, *dst;
int ret, i;
@@ -90,17 +91,11 @@ static void i915_gem_unmap_dma_buf(struct dma_buf_attachment *attachment,
kfree(sg);
}
-static void i915_gem_dmabuf_release(struct dma_buf *dma_buf)
-{
- struct drm_i915_gem_object *obj = dma_buf->priv;
-
- drm_gem_object_unreference_unlocked(&obj->base);
-}
-
static void *i915_gem_dmabuf_vmap(struct dma_buf *dma_buf)
{
- struct drm_i915_gem_object *obj = dma_buf->priv;
- struct drm_device *dev = obj->base.dev;
+ struct drm_gem_object *gobj = dma_buf->priv;
+ struct drm_i915_gem_object *obj = to_intel_bo(gobj);
+ struct drm_device *dev = gobj->dev;
struct scatterlist *sg;
struct page **pages;
int ret, i;
@@ -146,7 +141,8 @@ error:
static void i915_gem_dmabuf_vunmap(struct dma_buf *dma_buf, void *vaddr)
{
- struct drm_i915_gem_object *obj = dma_buf->priv;
+ struct drm_gem_object *gobj = dma_buf->priv;
+ struct drm_i915_gem_object *obj = to_intel_bo(gobj);
struct drm_device *dev = obj->base.dev;
int ret;
@@ -189,7 +185,8 @@ static int i915_gem_dmabuf_mmap(struct dma_buf *dma_buf, struct vm_area_struct *
static int i915_gem_begin_cpu_access(struct dma_buf *dma_buf, size_t start, size_t length, enum dma_data_direction direction)
{
- struct drm_i915_gem_object *obj = dma_buf->priv;
+ struct drm_gem_object *gobj = dma_buf->priv;
+ struct drm_i915_gem_object *obj = to_intel_bo(gobj);
struct drm_device *dev = obj->base.dev;
int ret;
bool write = (direction == DMA_BIDIRECTIONAL || direction == DMA_TO_DEVICE);
@@ -206,7 +203,7 @@ static int i915_gem_begin_cpu_access(struct dma_buf *dma_buf, size_t start, size
static const struct dma_buf_ops i915_dmabuf_ops = {
.map_dma_buf = i915_gem_map_dma_buf,
.unmap_dma_buf = i915_gem_unmap_dma_buf,
- .release = i915_gem_dmabuf_release,
+ .release = drm_gem_prime_release,
.kmap = i915_gem_dmabuf_kmap,
.kmap_atomic = i915_gem_dmabuf_kmap_atomic,
.kunmap = i915_gem_dmabuf_kunmap,
@@ -222,7 +219,7 @@ struct dma_buf *i915_gem_prime_export(struct drm_device *dev,
{
struct drm_i915_gem_object *obj = to_intel_bo(gem_obj);
- return dma_buf_export(obj, &i915_dmabuf_ops, obj->base.size, flags);
+ return dma_buf_export(gem_obj, &i915_dmabuf_ops, obj->base.size, flags);
}
static int i915_gem_object_get_pages_dmabuf(struct drm_i915_gem_object *obj)
@@ -259,7 +256,7 @@ struct drm_gem_object *i915_gem_prime_import(struct drm_device *dev,
/* is this one of own objects? */
if (dma_buf->ops == &i915_dmabuf_ops) {
- obj = dma_buf->priv;
+ obj = to_intel_bo(dma_buf->priv);
/* is it from our device? */
if (obj->base.dev == dev) {
/*
diff --git a/drivers/gpu/drm/nouveau/nouveau_prime.c b/drivers/gpu/drm/nouveau/nouveau_prime.c
index 377452e3977e..1b45b60da9b2 100644
--- a/drivers/gpu/drm/nouveau/nouveau_prime.c
+++ b/drivers/gpu/drm/nouveau/nouveau_prime.c
@@ -32,7 +32,8 @@
static struct sg_table *nouveau_gem_map_dma_buf(struct dma_buf_attachment *attachment,
enum dma_data_direction dir)
{
- struct nouveau_bo *nvbo = attachment->dmabuf->priv;
+ struct drm_gem_object *obj = attachment->dmabuf->priv;
+ struct nouveau_bo *nvbo = nouveau_gem_object(obj);
struct drm_device *dev = nvbo->gem->dev;
int npages = nvbo->bo.num_pages;
struct sg_table *sg;
@@ -53,13 +54,6 @@ static void nouveau_gem_unmap_dma_buf(struct dma_buf_attachment *attachment,
kfree(sg);
}
-static void nouveau_gem_dmabuf_release(struct dma_buf *dma_buf)
-{
- struct nouveau_bo *nvbo = dma_buf->priv;
-
- drm_gem_object_unreference_unlocked(nvbo->gem);
-}
-
static void *nouveau_gem_kmap_atomic(struct dma_buf *dma_buf, unsigned long page_num)
{
return NULL;
@@ -86,7 +80,8 @@ static int nouveau_gem_prime_mmap(struct dma_buf *dma_buf, struct vm_area_struct
static void *nouveau_gem_prime_vmap(struct dma_buf *dma_buf)
{
- struct nouveau_bo *nvbo = dma_buf->priv;
+ struct drm_gem_object *obj = dma_buf->priv;
+ struct nouveau_bo *nvbo = nouveau_gem_object(obj);
struct drm_device *dev = nvbo->gem->dev;
int ret;
@@ -110,7 +105,8 @@ out_unlock:
static void nouveau_gem_prime_vunmap(struct dma_buf *dma_buf, void *vaddr)
{
- struct nouveau_bo *nvbo = dma_buf->priv;
+ struct drm_gem_object *obj = dma_buf->priv;
+ struct nouveau_bo *nvbo = nouveau_gem_object(obj);
struct drm_device *dev = nvbo->gem->dev;
mutex_lock(&dev->struct_mutex);
@@ -124,7 +120,7 @@ static void nouveau_gem_prime_vunmap(struct dma_buf *dma_buf, void *vaddr)
static const struct dma_buf_ops nouveau_dmabuf_ops = {
.map_dma_buf = nouveau_gem_map_dma_buf,
.unmap_dma_buf = nouveau_gem_unmap_dma_buf,
- .release = nouveau_gem_dmabuf_release,
+ .release = drm_gem_prime_release,
.kmap = nouveau_gem_kmap,
.kmap_atomic = nouveau_gem_kmap_atomic,
.kunmap = nouveau_gem_kunmap,
@@ -183,10 +179,12 @@ struct drm_gem_object *nouveau_gem_prime_import(struct drm_device *dev,
struct dma_buf_attachment *attach;
struct sg_table *sg;
struct nouveau_bo *nvbo;
+ struct drm_gem_object *obj;
int ret;
if (dma_buf->ops == &nouveau_dmabuf_ops) {
- nvbo = dma_buf->priv;
+ obj = dma_buf->priv;
+ nvbo = nouveau_gem_object(obj);
if (nvbo->gem) {
if (nvbo->gem->dev == dev) {
drm_gem_object_reference(nvbo->gem);
diff --git a/drivers/gpu/drm/radeon/radeon_prime.c b/drivers/gpu/drm/radeon/radeon_prime.c
index f9a31688c123..577e848b6606 100644
--- a/drivers/gpu/drm/radeon/radeon_prime.c
+++ b/drivers/gpu/drm/radeon/radeon_prime.c
@@ -33,7 +33,7 @@
static struct sg_table *radeon_gem_map_dma_buf(struct dma_buf_attachment *attachment,
enum dma_data_direction dir)
{
- struct radeon_bo *bo = attachment->dmabuf->priv;
+ struct radeon_bo *bo = gem_to_radeon_bo(attachment->dmabuf->priv);
struct drm_device *dev = bo->rdev->ddev;
int npages = bo->tbo.num_pages;
struct sg_table *sg;
@@ -54,13 +54,6 @@ static void radeon_gem_unmap_dma_buf(struct dma_buf_attachment *attachment,
kfree(sg);
}
-static void radeon_gem_dmabuf_release(struct dma_buf *dma_buf)
-{
- struct radeon_bo *bo = dma_buf->priv;
-
- drm_gem_object_unreference_unlocked(&bo->gem_base);
-}
-
static void *radeon_gem_kmap_atomic(struct dma_buf *dma_buf, unsigned long page_num)
{
return NULL;
@@ -87,7 +80,7 @@ static int radeon_gem_prime_mmap(struct dma_buf *dma_buf, struct vm_area_struct
static void *radeon_gem_prime_vmap(struct dma_buf *dma_buf)
{
- struct radeon_bo *bo = dma_buf->priv;
+ struct radeon_bo *bo = gem_to_radeon_bo(dma_buf->priv);
struct drm_device *dev = bo->rdev->ddev;
int ret;
@@ -111,7 +104,7 @@ out_unlock:
static void radeon_gem_prime_vunmap(struct dma_buf *dma_buf, void *vaddr)
{
- struct radeon_bo *bo = dma_buf->priv;
+ struct radeon_bo *bo = gem_to_radeon_bo(dma_buf->priv);
struct drm_device *dev = bo->rdev->ddev;
mutex_lock(&dev->struct_mutex);
@@ -124,7 +117,7 @@ static void radeon_gem_prime_vunmap(struct dma_buf *dma_buf, void *vaddr)
const static struct dma_buf_ops radeon_dmabuf_ops = {
.map_dma_buf = radeon_gem_map_dma_buf,
.unmap_dma_buf = radeon_gem_unmap_dma_buf,
- .release = radeon_gem_dmabuf_release,
+ .release = drm_gem_prime_release,
.kmap = radeon_gem_kmap,
.kmap_atomic = radeon_gem_kmap_atomic,
.kunmap = radeon_gem_kunmap,
@@ -175,7 +168,7 @@ struct dma_buf *radeon_gem_prime_export(struct drm_device *dev,
return ERR_PTR(ret);
}
radeon_bo_unreserve(bo);
- return dma_buf_export(bo, &radeon_dmabuf_ops, obj->size, flags);
+ return dma_buf_export(obj, &radeon_dmabuf_ops, obj->size, flags);
}
struct drm_gem_object *radeon_gem_prime_import(struct drm_device *dev,
@@ -187,7 +180,7 @@ struct drm_gem_object *radeon_gem_prime_import(struct drm_device *dev,
int ret;
if (dma_buf->ops == &radeon_dmabuf_ops) {
- bo = dma_buf->priv;
+ bo = gem_to_radeon_bo(dma_buf->priv);
if (bo->gem_base.dev == dev) {
drm_gem_object_reference(&bo->gem_base);
dma_buf_put(dma_buf);
diff --git a/include/drm/drmP.h b/include/drm/drmP.h
index 0d584dd9ba3d..376852381221 100644
--- a/include/drm/drmP.h
+++ b/include/drm/drmP.h
@@ -418,8 +418,8 @@ struct drm_pending_event {
void (*destroy)(struct drm_pending_event *event);
};
-/* initial implementaton using a linked list - todo hashtab */
struct drm_prime_file_private {
+ /* the linked list of dma_buf/handle is protected by the lock */
struct list_head head;
struct mutex lock;
};
@@ -670,6 +670,7 @@ struct drm_gem_object {
void *driver_private;
/* dma buf attach to this GEM object */
+ /* this is protected by the device prime_lock */
struct dma_buf *dma_buf;
/* dma buf attachment backing this object */
@@ -1194,6 +1195,15 @@ struct drm_device {
/*@{ */
spinlock_t object_name_lock;
struct idr object_name_idr;
+
+ /**
+ * prime_lock - protects dma buf state of exported GEM objects
+ *
+ * Specifically obj->dma_buf, but this can be used by drivers for
+ * other things.
+ */
+ struct mutex prime_lock;
+
/*@} */
int switch_power_state;
@@ -1555,7 +1565,7 @@ extern int drm_prime_sg_to_page_addr_arrays(struct sg_table *sgt, struct page **
dma_addr_t *addrs, int max_pages);
extern struct sg_table *drm_prime_pages_to_sg(struct page **pages, int nr_pages);
extern void drm_prime_gem_destroy(struct drm_gem_object *obj, struct sg_table *sg);
-
+extern void drm_gem_prime_release(struct dma_buf *dma_buf);
void drm_prime_init_file_private(struct drm_prime_file_private *prime_fpriv);
void drm_prime_destroy_file_private(struct drm_prime_file_private *prime_fpriv);