summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorThomas Hellstrom <thomas@tungstengraphics.com>2006-03-24 16:17:53 +0000
committerThomas Hellstrom <thomas@tungstengraphics.com>2006-03-24 16:17:53 +0000
commit2c1999a5f8dbc67c94171034714a5825f22e41dc (patch)
treefb9e7db5665865d5e3a59d524ec64f0049acfaaf
parent202e509601cf481eefaf5db266cca09cf8e56802 (diff)
Delayed buffer destruction bugfixes, Pinned buffer destruction fixes.
-rw-r--r--linux-core/drm_drv.c15
-rw-r--r--linux-core/drm_fops.c45
-rw-r--r--linux-core/drm_ttm.c151
-rw-r--r--linux-core/drm_ttm.h3
-rw-r--r--shared-core/i915_dma.c8
-rw-r--r--shared-core/i915_irq.c4
6 files changed, 133 insertions, 93 deletions
diff --git a/linux-core/drm_drv.c b/linux-core/drm_drv.c
index 85991a97..6cd99a09 100644
--- a/linux-core/drm_drv.c
+++ b/linux-core/drm_drv.c
@@ -143,11 +143,23 @@ int drm_lastclose(drm_device_t * dev)
int i;
DRM_DEBUG("\n");
+ /*
+ * Take down MM before driver destroys data needed for hooks.
+ */
+
+ if (dev->mm_driver) {
+ down(&dev->ttm_sem);
+ down(&dev->struct_sem);
+ drm_mm_do_takedown(dev);
+ up(&dev->struct_sem);
+ up(&dev->ttm_sem);
+ }
if (dev->driver->lastclose)
dev->driver->lastclose(dev);
DRM_DEBUG("driver lastclose completed\n");
+
if (dev->unique) {
drm_free(dev->unique, strlen(dev->unique) + 1, DRM_MEM_DRIVER);
dev->unique=NULL;
@@ -211,9 +223,6 @@ int drm_lastclose(drm_device_t * dev)
dev->vmalist = NULL;
}
- if (dev->mm_driver) {
- drm_mm_do_takedown(dev);
- }
if (dev->maplist) {
while (!list_empty(&dev->maplist->head)) {
diff --git a/linux-core/drm_fops.c b/linux-core/drm_fops.c
index af587e38..9e9ba994 100644
--- a/linux-core/drm_fops.c
+++ b/linux-core/drm_fops.c
@@ -317,6 +317,24 @@ int drm_fasync(int fd, struct file *filp, int on)
}
EXPORT_SYMBOL(drm_fasync);
+
+static void drm_prefence_ttm_locked(drm_file_t *priv, drm_device_t *dev)
+{
+ drm_map_list_t *entry;
+ drm_ttm_backend_list_t *bentry;
+
+ if (dev->mm_driver) {
+ list_for_each_entry(entry, &priv->ttms, head) {
+ drm_ttm_fence_before_destroy((drm_ttm_t *)
+ entry->map->offset);
+ }
+ list_for_each_entry(bentry, &priv->anon_ttm_regs, head) {
+ drm_fence_unfenced_region(bentry);
+ }
+ }
+}
+
+
/**
* Release file.
*
@@ -362,18 +380,8 @@ int drm_release(struct inode *inode, struct file *filp)
if (dev->driver->reclaim_buffers_locked)
dev->driver->reclaim_buffers_locked(dev, filp);
- /*
- * FIXME: These need to go away.
- */
+ drm_prefence_ttm_locked(priv, dev);
- if (dev->mm_driver) {
- uint32_t fence = dev->mm_driver->emit_fence(dev, 0);
- unsigned long end = jiffies + DRM_HZ;
- BUG_ON(!_DRM_LOCK_IS_HELD( dev->lock.hw_lock->lock ));
-
- while(!time_after_eq(jiffies, end) &&
- -EINTR == dev->mm_driver->wait_fence(dev, 0, fence));
- }
drm_lock_free(dev, &dev->lock.hw_lock->lock,
_DRM_LOCKING_CONTEXT(dev->lock.hw_lock->lock));
@@ -415,19 +423,8 @@ int drm_release(struct inode *inode, struct file *filp)
if (dev->driver->reclaim_buffers_locked)
dev->driver->reclaim_buffers_locked(dev, filp);
- /*
- * FIXME: These need to go away.
- */
+ drm_prefence_ttm_locked(priv, dev);
-
- if (dev->mm_driver) {
- uint32_t fence = dev->mm_driver->emit_fence(dev, 0);
- unsigned long end = jiffies + DRM_HZ;
- BUG_ON(!_DRM_LOCK_IS_HELD( dev->lock.hw_lock->lock ));
- while(!time_after_eq(jiffies, end) &&
- -EINTR == dev->mm_driver->wait_fence(dev, 0, fence));
- }
-
drm_lock_free(dev, &dev->lock.hw_lock->lock,
DRM_KERNEL_CONTEXT);
}
@@ -494,7 +491,7 @@ int drm_release(struct inode *inode, struct file *filp)
if (!drm_find_ht_item(&dev->maphash, entry, &hash)) {
drm_remove_ht_val(&dev->maphash, hash);
}
- if (!drm_destroy_ttm((drm_ttm_t *) entry->map->offset))
+ if (-EBUSY != drm_destroy_ttm((drm_ttm_t *) entry->map->offset))
drm_free(entry->map, sizeof(*entry->map),
DRM_MEM_MAPS);
drm_free(entry, sizeof(*entry), DRM_MEM_MAPS);
diff --git a/linux-core/drm_ttm.c b/linux-core/drm_ttm.c
index 4efa740f..39273d99 100644
--- a/linux-core/drm_ttm.c
+++ b/linux-core/drm_ttm.c
@@ -155,7 +155,7 @@ static void drm_change_protection(struct vm_area_struct *vma,
*/
typedef struct p_mm_entry {
- struct list_head head;
+ struct list_head head;
struct mm_struct *mm;
atomic_t refcount;
} p_mm_entry_t;
@@ -167,7 +167,6 @@ typedef struct drm_val_action {
int validated;
} drm_val_action_t;
-
/*
* We may be manipulating other processes page tables, so for each TTM, keep track of
* which mm_structs are currently mapping the ttm so that we can take the appropriate
@@ -175,8 +174,7 @@ typedef struct drm_val_action {
* process' buffers.
*/
-
-int drm_ttm_add_mm_to_list(drm_ttm_t *ttm, struct mm_struct *mm)
+int drm_ttm_add_mm_to_list(drm_ttm_t * ttm, struct mm_struct *mm)
{
p_mm_entry_t *entry, *n_entry;
@@ -184,7 +182,7 @@ int drm_ttm_add_mm_to_list(drm_ttm_t *ttm, struct mm_struct *mm)
if (mm == entry->mm) {
atomic_inc(&entry->refcount);
return 0;
- } else if ((unsigned long) mm < (unsigned long) entry->mm);
+ } else if ((unsigned long)mm < (unsigned long)entry->mm) ;
}
n_entry = drm_alloc(sizeof(*n_entry), DRM_MEM_MM);
@@ -197,12 +195,13 @@ int drm_ttm_add_mm_to_list(drm_ttm_t *ttm, struct mm_struct *mm)
atomic_set(&n_entry->refcount, 0);
atomic_inc(&ttm->shared_count);
ttm->mm_list_seq++;
-
+
list_add_tail(&n_entry->head, &entry->head);
+
return 0;
}
-void drm_ttm_delete_mm(drm_ttm_t *ttm, struct mm_struct *mm)
+void drm_ttm_delete_mm(drm_ttm_t * ttm, struct mm_struct *mm)
{
p_mm_entry_t *entry, *n;
list_for_each_entry_safe(entry, n, &ttm->p_mm_list, head) {
@@ -214,16 +213,15 @@ void drm_ttm_delete_mm(drm_ttm_t *ttm, struct mm_struct *mm)
ttm->mm_list_seq++;
}
return;
- }
+ }
}
BUG_ON(TRUE);
}
-
-static void drm_ttm_lock_mm(drm_ttm_t *ttm, int mm_sem, int page_table)
+static void drm_ttm_lock_mm(drm_ttm_t * ttm, int mm_sem, int page_table)
{
p_mm_entry_t *entry;
-
+
list_for_each_entry(entry, &ttm->p_mm_list, head) {
if (mm_sem) {
down_write(&entry->mm->mmap_sem);
@@ -234,11 +232,11 @@ static void drm_ttm_lock_mm(drm_ttm_t *ttm, int mm_sem, int page_table)
}
}
-static void drm_ttm_unlock_mm(drm_ttm_t *ttm, int mm_sem, int page_table)
+static void drm_ttm_unlock_mm(drm_ttm_t * ttm, int mm_sem, int page_table)
{
- p_mm_entry_t *entry;
-
- list_for_each_entry(entry, &ttm->p_mm_list, head) {
+ p_mm_entry_t *entry;
+
+ list_for_each_entry(entry, &ttm->p_mm_list, head) {
if (page_table) {
spin_unlock(&entry->mm->page_table_lock);
}
@@ -247,7 +245,6 @@ static void drm_ttm_unlock_mm(drm_ttm_t *ttm, int mm_sem, int page_table)
}
}
}
-
static int ioremap_vmas(drm_ttm_t * ttm, unsigned long page_offset,
unsigned long num_pages, unsigned long aper_offset)
@@ -255,11 +252,10 @@ static int ioremap_vmas(drm_ttm_t * ttm, unsigned long page_offset,
struct list_head *list;
int ret = 0;
-
list_for_each(list, &ttm->vma_list->head) {
drm_ttm_vma_list_t *entry =
list_entry(list, drm_ttm_vma_list_t, head);
-
+
ret = io_remap_pfn_range(entry->vma,
entry->vma->vm_start +
(page_offset << PAGE_SHIFT),
@@ -341,6 +337,7 @@ int drm_destroy_ttm(drm_ttm_t * ttm)
}
drm_free(ttm->be_list, sizeof(*ttm->be_list), DRM_MEM_MAPS);
+ ttm->be_list = NULL;
}
if (atomic_read(&ttm->unfinished_regions) > 0) {
@@ -366,9 +363,11 @@ int drm_destroy_ttm(drm_ttm_t * ttm)
}
global_flush_tlb();
vfree(ttm->pages);
+ ttm->pages = NULL;
}
if (ttm->page_flags) {
vfree(ttm->page_flags);
+ ttm->page_flags = NULL;
}
if (ttm->vma_list) {
@@ -380,6 +379,7 @@ int drm_destroy_ttm(drm_ttm_t * ttm)
drm_free(entry, sizeof(*entry), DRM_MEM_MAPS);
}
drm_free(ttm->vma_list, sizeof(*ttm->vma_list), DRM_MEM_MAPS);
+ ttm->vma_list = NULL;
}
drm_free(ttm, sizeof(*ttm), DRM_MEM_MAPS);
@@ -462,60 +462,61 @@ drm_ttm_t *drm_init_ttm(struct drm_device * dev, unsigned long size)
* it after relocking dev->struc_sem.
*/
-
-static int drm_ttm_lock_mmap_sem(drm_ttm_t *ttm)
+static int drm_ttm_lock_mmap_sem(drm_ttm_t * ttm)
{
struct mm_struct **mm_list = NULL, **mm_list_p;
uint32_t list_seq;
- uint32_t cur_count,shared_count;
+ uint32_t cur_count, shared_count;
p_mm_entry_t *entry;
unsigned i;
-
+
cur_count = 0;
list_seq = ttm->mm_list_seq;
+ shared_count = atomic_read(&ttm->shared_count);
do {
- shared_count = atomic_read(&ttm->shared_count);
if (shared_count > cur_count) {
- if (mm_list)
- drm_free(mm_list, sizeof(*mm_list)*cur_count, DRM_MEM_MM);
+ if (mm_list)
+ drm_free(mm_list, sizeof(*mm_list) * cur_count,
+ DRM_MEM_MM);
cur_count = shared_count + 10;
- mm_list = drm_alloc(sizeof(*mm_list) * cur_count, DRM_MEM_MM);
- if (!mm_list)
+ mm_list =
+ drm_alloc(sizeof(*mm_list) * cur_count, DRM_MEM_MM);
+ if (!mm_list)
return -ENOMEM;
}
-
+
mm_list_p = mm_list;
list_for_each_entry(entry, &ttm->p_mm_list, head) {
*mm_list_p++ = entry->mm;
- }
+ }
up(&ttm->dev->struct_sem);
mm_list_p = mm_list;
- for (i=0; i<shared_count; ++i, ++mm_list_p) {
+ for (i = 0; i < shared_count; ++i, ++mm_list_p) {
down_write(&((*mm_list_p)->mmap_sem));
}
-
+
down(&ttm->dev->struct_sem);
if (list_seq != ttm->mm_list_seq) {
mm_list_p = mm_list;
- for (i=0; i<shared_count; ++i, ++mm_list_p) {
+ for (i = 0; i < shared_count; ++i, ++mm_list_p) {
up_write(&((*mm_list_p)->mmap_sem));
}
- }
+ }
+ shared_count = atomic_read(&ttm->shared_count);
+
+ } while (list_seq != ttm->mm_list_seq);
- } while(list_seq != ttm->mm_list_seq);
-
- if (mm_list)
- drm_free(mm_list, sizeof(*mm_list)*cur_count, DRM_MEM_MM);
+ if (mm_list)
+ drm_free(mm_list, sizeof(*mm_list) * cur_count, DRM_MEM_MM);
ttm->mmap_sem_locked = TRUE;
return 0;
}
-
/*
* Change caching policy for range of pages in a ttm.
*/
@@ -611,11 +612,15 @@ static int remove_ttm_region(drm_ttm_backend_list_t * entry, int ret_if_busy)
if (mm_priv->fence_valid) {
if (ret_if_busy
&& !dev->mm_driver->test_fence(mm->dev, entry->fence_type,
- mm_priv->fence))
+ mm_priv->fence)) {
+ DRM_DEBUG("Fence not fulfilled\n");
return -EBUSY;
+ }
ret = drm_wait_buf_busy(entry);
- if (ret)
+ if (ret) {
+ DRM_DEBUG("Nope, buf busy.\n");
return ret;
+ }
}
entry->mm_node = NULL;
@@ -628,8 +633,6 @@ static int remove_ttm_region(drm_ttm_backend_list_t * entry, int ret_if_busy)
return 0;
}
-
-
/*
* Unbind a ttm region from the aperture and take it out of the
* aperture manager.
@@ -645,7 +648,7 @@ int drm_evict_ttm_region(drm_ttm_backend_list_t * entry)
switch (entry->state) {
case ttm_bound:
if (ttm && be->needs_cache_adjust(be)) {
- BUG_ON(entry->flags & DRM_MM_CACHED);
+ BUG_ON(entry->flags & DRM_MM_CACHED);
ret = drm_ttm_lock_mmap_sem(ttm);
if (ret)
return ret;
@@ -690,9 +693,11 @@ int drm_ttm_destroy_delayed(drm_ttm_mm_t * mm, int ret_if_busy)
list_for_each_safe(list, next, &mm->delayed) {
drm_ttm_backend_list_t *entry =
list_entry(list, drm_ttm_backend_list_t, head);
- if (!remove_ttm_region(entry, ret_if_busy))
+ DRM_DEBUG("Trying to remove put-on-hold from aperture\n");
+ if (remove_ttm_region(entry, ret_if_busy))
continue;
+ list_del_init(list);
ttm = entry->owner;
if (ttm) {
DRM_DEBUG("Destroying put-on-hold region\n");
@@ -731,7 +736,6 @@ void drm_destroy_ttm_region(drm_ttm_backend_list_t * entry)
list_add_tail(&entry->head, &entry->mm->delayed);
return;
}
-
drm_unbind_ttm_region(entry);
if (be) {
be->clear(entry->be);
@@ -744,7 +748,6 @@ void drm_destroy_ttm_region(drm_ttm_backend_list_t * entry)
}
be->destroy(be);
}
-
cur_page_flags = ttm->page_flags + entry->page_offset;
for (i = 0; i < entry->num_pages; ++i) {
DRM_MASK_VAL(*cur_page_flags, DRM_TTM_PAGE_USED, 0);
@@ -854,14 +857,14 @@ int drm_bind_ttm_region(drm_ttm_backend_list_t * region,
ttm = region->owner;
if (ttm && be->needs_cache_adjust(be)) {
- BUG_ON(region->flags & DRM_MM_CACHED);
+ BUG_ON(region->flags & DRM_MM_CACHED);
ret = drm_ttm_lock_mmap_sem(ttm);
if (ret)
return ret;
drm_set_caching(ttm, region->page_offset, region->num_pages,
DRM_TTM_PAGE_UNCACHED, TRUE);
} else {
- DRM_ERROR("Binding cached\n");
+ DRM_ERROR("Binding cached\n");
}
if ((ret = be->bind(be, aper_offset))) {
@@ -896,6 +899,41 @@ int drm_rebind_ttm_region(drm_ttm_backend_list_t * entry,
}
+void drm_fence_unfenced_region(drm_ttm_backend_list_t * entry)
+{
+ drm_mm_node_t *mm_node;
+ drm_ttm_mm_priv_t *mm_priv;
+ uint32_t fence;
+ drm_device_t *dev;
+
+ if (!entry)
+ return;
+
+ dev = entry->mm->dev;
+ mm_node = entry->mm_node;
+ if (!mm_node)
+ return;
+
+ mm_priv = (drm_ttm_mm_priv_t *) mm_node->private;
+ if (mm_priv->fence_valid)
+ return;
+
+ BUG_ON(!_DRM_LOCK_IS_HELD(dev->lock.hw_lock->lock));
+ DRM_DEBUG("Fencing unfenced region\n");
+ fence = dev->mm_driver->emit_fence(dev, entry->fence_type);
+ mm_priv->fence = fence;
+ mm_priv->fence_valid = TRUE;
+}
+
+void drm_ttm_fence_before_destroy(drm_ttm_t * ttm)
+{
+ drm_ttm_backend_list_t *entry;
+
+ list_for_each_entry(entry, &ttm->be_list->head, head) {
+ drm_fence_unfenced_region(entry);
+ }
+}
+
/*
* Destroy an anonymous ttm region.
*/
@@ -1085,7 +1123,7 @@ static void drm_ttm_fence_regions(drm_device_t * dev, drm_ttm_mm_t * mm)
fence_type = entry->region->fence_type;
if (!emitted[fence_type]) {
- BUG_ON(!_DRM_LOCK_IS_HELD( dev->lock.hw_lock->lock ));
+ BUG_ON(!_DRM_LOCK_IS_HELD(dev->lock.hw_lock->lock));
fence = dev->mm_driver->emit_fence(dev, fence_type);
fence_seqs[fence_type] = fence;
emitted[fence_type] = TRUE;
@@ -1145,6 +1183,7 @@ static int drm_ttm_evict_lru_sl(drm_ttm_backend_list_t * entry)
break;
spin_unlock(mm_lock);
+ drm_ttm_destroy_delayed(entry->mm, TRUE);
up(&dev->struct_sem);
ret = drm_wait_buf_busy(evict_priv->region);
@@ -1192,9 +1231,9 @@ static int drm_validate_ttm_region(drm_ttm_backend_list_t * entry,
}
num_pages = (entry->owner) ? entry->num_pages : entry->anon_locked;
+ drm_ttm_destroy_delayed(entry->mm, TRUE);
spin_lock(mm_lock);
while (!mm_node) {
- drm_ttm_destroy_delayed(entry->mm, TRUE);
mm_node =
drm_mm_search_free_locked(&entry->mm->mm, num_pages, 0, 0);
if (!mm_node) {
@@ -1759,14 +1798,12 @@ int drm_mm_do_takedown(drm_device_t * dev)
{
drm_mm_driver_t *mm_driver;
-
if (!dev->mm_driver) {
DRM_ERROR("Memory manager not initialized.\n");
return -EINVAL;
}
mm_driver = dev->mm_driver;
- drm_mm_takedown(&mm_driver->vr_mm);
drm_ttm_mm_takedown(&mm_driver->ttm_mm);
drm_rmmap_locked(dev, mm_driver->mm_sarea_map->map);
dev->mm_driver = NULL;
@@ -1809,16 +1846,16 @@ int drm_mm_do_init(drm_device_t * dev, drm_mm_init_arg_t * arg)
tt_p_offset |= (arg->req.tt_p_offset_hi << shift);
}
- DRM_ERROR("Offset 0x%lx, Pages %ld\n",
+ DRM_DEBUG("Offset 0x%lx, Pages %ld\n",
tt_p_offset << PAGE_SHIFT, tt_p_size);
- mm_driver = dev->driver->init_mm(dev);
-
+ mm_driver = dev->driver->init_mm(dev);
+
if (!mm_driver) {
DRM_ERROR("Memory manager initialization failed.\n");
return -EINVAL;
}
-
+
down(&dev->struct_sem);
dev->mm_driver = mm_driver;
up(&dev->struct_sem);
@@ -1826,7 +1863,7 @@ int drm_mm_do_init(drm_device_t * dev, drm_mm_init_arg_t * arg)
drm_ttm_mm_init(dev, &dev->mm_driver->ttm_mm, tt_p_offset, tt_p_size);
drm_mm_init(&dev->mm_driver->vr_mm, vr_offset >> MM_VR_GRANULARITY,
vr_size >> MM_VR_GRANULARITY);
-
+
ret = drm_addmap_core(dev, 0, DRM_MM_SAREA_SIZE,
_DRM_SHM, _DRM_READ_ONLY, &mm_sarea);
diff --git a/linux-core/drm_ttm.h b/linux-core/drm_ttm.h
index b87b8610..8a82917c 100644
--- a/linux-core/drm_ttm.h
+++ b/linux-core/drm_ttm.h
@@ -130,7 +130,8 @@ extern int drm_destroy_ttm(drm_ttm_t * ttm);
extern void drm_user_destroy_region(drm_ttm_backend_list_t * entry);
extern int drm_ttm_add_mm_to_list(drm_ttm_t *ttm, struct mm_struct *mm);
extern void drm_ttm_delete_mm(drm_ttm_t *ttm, struct mm_struct *mm);
-
+extern void drm_ttm_fence_before_destroy(drm_ttm_t *ttm);
+extern void drm_fence_unfenced_region(drm_ttm_backend_list_t *entry);
extern int drm_ttm_ioctl(DRM_IOCTL_ARGS);
extern int drm_mm_init_ioctl(DRM_IOCTL_ARGS);
diff --git a/shared-core/i915_dma.c b/shared-core/i915_dma.c
index 6a329ee7..ae187c93 100644
--- a/shared-core/i915_dma.c
+++ b/shared-core/i915_dma.c
@@ -762,14 +762,6 @@ int i915_driver_load(drm_device_t *dev, unsigned long flags)
void i915_driver_lastclose(drm_device_t * dev)
{
-#ifdef TTM_INIT_HACK
- /*
- * FIXME: Temporary initialization hack.
- */
-
-
- drm_mm_do_takedown (dev);
-#endif
if (dev->dev_private) {
drm_i915_private_t *dev_priv = dev->dev_private;
i915_mem_takedown(&(dev_priv->agp_heap));
diff --git a/shared-core/i915_irq.c b/shared-core/i915_irq.c
index 759bc12d..f31a4e53 100644
--- a/shared-core/i915_irq.c
+++ b/shared-core/i915_irq.c
@@ -257,6 +257,10 @@ int i915_test_fence(drm_device_t *dev, uint32_t type, uint32_t fence)
{
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
int tmp = i915_do_test_fence(dev, fence);
+ if (!dev_priv) {
+ DRM_ERROR("called without initialization\n");
+ return TRUE;
+ }
fence = READ_BREADCRUMB(dev_priv);
dev->mm_driver->mm_sarea->retired[0] = fence;