diff options
Diffstat (limited to 'linux/drm_dma.h')
-rw-r--r-- | linux/drm_dma.h | 341 |
1 files changed, 9 insertions, 332 deletions
diff --git a/linux/drm_dma.h b/linux/drm_dma.h index 4ea6b07d9..640e245da 100644 --- a/linux/drm_dma.h +++ b/linux/drm_dma.h @@ -127,61 +127,6 @@ void DRM(dma_takedown)(drm_device_t *dev) } -#if __HAVE_DMA_HISTOGRAM -/* This is slow, but is useful for debugging. */ -int DRM(histogram_slot)(unsigned long count) -{ - int value = DRM_DMA_HISTOGRAM_INITIAL; - int slot; - - for (slot = 0; - slot < DRM_DMA_HISTOGRAM_SLOTS; - ++slot, value = DRM_DMA_HISTOGRAM_NEXT(value)) { - if (count < value) return slot; - } - return DRM_DMA_HISTOGRAM_SLOTS - 1; -} - -void DRM(histogram_compute)(drm_device_t *dev, drm_buf_t *buf) -{ - cycles_t queued_to_dispatched; - cycles_t dispatched_to_completed; - cycles_t completed_to_freed; - int q2d, d2c, c2f, q2c, q2f; - - if (buf->time_queued) { - queued_to_dispatched = (buf->time_dispatched - - buf->time_queued); - dispatched_to_completed = (buf->time_completed - - buf->time_dispatched); - completed_to_freed = (buf->time_freed - - buf->time_completed); - - q2d = DRM(histogram_slot)(queued_to_dispatched); - d2c = DRM(histogram_slot)(dispatched_to_completed); - c2f = DRM(histogram_slot)(completed_to_freed); - - q2c = DRM(histogram_slot)(queued_to_dispatched - + dispatched_to_completed); - q2f = DRM(histogram_slot)(queued_to_dispatched - + dispatched_to_completed - + completed_to_freed); - - atomic_inc(&dev->histo.total); - atomic_inc(&dev->histo.queued_to_dispatched[q2d]); - atomic_inc(&dev->histo.dispatched_to_completed[d2c]); - atomic_inc(&dev->histo.completed_to_freed[c2f]); - - atomic_inc(&dev->histo.queued_to_completed[q2c]); - atomic_inc(&dev->histo.queued_to_freed[q2f]); - - } - buf->time_queued = 0; - buf->time_dispatched = 0; - buf->time_completed = 0; - buf->time_freed = 0; -} -#endif void DRM(free_buffer)(drm_device_t *dev, drm_buf_t *buf) { @@ -191,9 +136,6 @@ void DRM(free_buffer)(drm_device_t *dev, drm_buf_t *buf) buf->pending = 0; buf->filp = 0; buf->used = 0; -#if __HAVE_DMA_HISTOGRAM - buf->time_completed = get_cycles(); -#endif if ( __HAVE_DMA_WAITQUEUE && waitqueue_active(&buf->dma_wait)) { wake_up_interruptible(&buf->dma_wait); @@ -238,276 +180,6 @@ void DRM(reclaim_buffers)( struct file *filp ) #endif -/* GH: This is a big hack for now... - */ -#if __HAVE_OLD_DMA - -void DRM(clear_next_buffer)(drm_device_t *dev) -{ - drm_device_dma_t *dma = dev->dma; - - dma->next_buffer = NULL; - if (dma->next_queue && !DRM_BUFCOUNT(&dma->next_queue->waitlist)) { - wake_up_interruptible(&dma->next_queue->flush_queue); - } - dma->next_queue = NULL; -} - -int DRM(select_queue)(drm_device_t *dev, void (*wrapper)(unsigned long)) -{ - int i; - int candidate = -1; - int j = jiffies; - - if (!dev) { - DRM_ERROR("No device\n"); - return -1; - } - if (!dev->queuelist || !dev->queuelist[DRM_KERNEL_CONTEXT]) { - /* This only happens between the time the - interrupt is initialized and the time - the queues are initialized. */ - return -1; - } - - /* Doing "while locked" DMA? */ - if (DRM_WAITCOUNT(dev, DRM_KERNEL_CONTEXT)) { - return DRM_KERNEL_CONTEXT; - } - - /* If there are buffers on the last_context - queue, and we have not been executing - this context very long, continue to - execute this context. */ - if (dev->last_switch <= j - && dev->last_switch + DRM_TIME_SLICE > j - && DRM_WAITCOUNT(dev, dev->last_context)) { - return dev->last_context; - } - - /* Otherwise, find a candidate */ - for (i = dev->last_checked + 1; i < dev->queue_count; i++) { - if (DRM_WAITCOUNT(dev, i)) { - candidate = dev->last_checked = i; - break; - } - } - - if (candidate < 0) { - for (i = 0; i < dev->queue_count; i++) { - if (DRM_WAITCOUNT(dev, i)) { - candidate = dev->last_checked = i; - break; - } - } - } - - if (wrapper - && candidate >= 0 - && candidate != dev->last_context - && dev->last_switch <= j - && dev->last_switch + DRM_TIME_SLICE > j) { - if (dev->timer.expires != dev->last_switch + DRM_TIME_SLICE) { - del_timer(&dev->timer); - dev->timer.function = wrapper; - dev->timer.data = (unsigned long)dev; - dev->timer.expires = dev->last_switch+DRM_TIME_SLICE; - add_timer(&dev->timer); - } - return -1; - } - - return candidate; -} - - -int DRM(dma_enqueue)(struct file *filp, drm_dma_t *d) -{ - drm_file_t *priv = filp->private_data; - drm_device_t *dev = priv->dev; - int i; - drm_queue_t *q; - drm_buf_t *buf; - int idx; - int while_locked = 0; - drm_device_dma_t *dma = dev->dma; - DECLARE_WAITQUEUE(entry, current); - - DRM_DEBUG("%d\n", d->send_count); - - if (d->flags & _DRM_DMA_WHILE_LOCKED) { - int context = dev->lock.hw_lock->lock; - - if (!_DRM_LOCK_IS_HELD(context)) { - DRM_ERROR("No lock held during \"while locked\"" - " request\n"); - return -EINVAL; - } - if (d->context != _DRM_LOCKING_CONTEXT(context) - && _DRM_LOCKING_CONTEXT(context) != DRM_KERNEL_CONTEXT) { - DRM_ERROR("Lock held by %d while %d makes" - " \"while locked\" request\n", - _DRM_LOCKING_CONTEXT(context), - d->context); - return -EINVAL; - } - q = dev->queuelist[DRM_KERNEL_CONTEXT]; - while_locked = 1; - } else { - q = dev->queuelist[d->context]; - } - - - atomic_inc(&q->use_count); - if (atomic_read(&q->block_write)) { - add_wait_queue(&q->write_queue, &entry); - atomic_inc(&q->block_count); - for (;;) { - current->state = TASK_INTERRUPTIBLE; - if (!atomic_read(&q->block_write)) break; - schedule(); - if (signal_pending(current)) { - atomic_dec(&q->use_count); - remove_wait_queue(&q->write_queue, &entry); - return -EINTR; - } - } - atomic_dec(&q->block_count); - current->state = TASK_RUNNING; - remove_wait_queue(&q->write_queue, &entry); - } - - for (i = 0; i < d->send_count; i++) { - idx = d->send_indices[i]; - if (idx < 0 || idx >= dma->buf_count) { - atomic_dec(&q->use_count); - DRM_ERROR("Index %d (of %d max)\n", - d->send_indices[i], dma->buf_count - 1); - return -EINVAL; - } - buf = dma->buflist[ idx ]; - if (buf->filp != filp) { - atomic_dec(&q->use_count); - DRM_ERROR("Process %d using buffer not owned\n", - current->pid); - return -EINVAL; - } - if (buf->list != DRM_LIST_NONE) { - atomic_dec(&q->use_count); - DRM_ERROR("Process %d using buffer %d on list %d\n", - current->pid, buf->idx, buf->list); - } - buf->used = d->send_sizes[i]; - buf->while_locked = while_locked; - buf->context = d->context; - if (!buf->used) { - DRM_ERROR("Queueing 0 length buffer\n"); - } - if (buf->pending) { - atomic_dec(&q->use_count); - DRM_ERROR("Queueing pending buffer:" - " buffer %d, offset %d\n", - d->send_indices[i], i); - return -EINVAL; - } - if (buf->waiting) { - atomic_dec(&q->use_count); - DRM_ERROR("Queueing waiting buffer:" - " buffer %d, offset %d\n", - d->send_indices[i], i); - return -EINVAL; - } - buf->waiting = 1; - if (atomic_read(&q->use_count) == 1 - || atomic_read(&q->finalization)) { - DRM(free_buffer)(dev, buf); - } else { - DRM(waitlist_put)(&q->waitlist, buf); - atomic_inc(&q->total_queued); - } - } - atomic_dec(&q->use_count); - - return 0; -} - -static int DRM(dma_get_buffers_of_order)(struct file *filp, drm_dma_t *d, - int order) -{ - drm_file_t *priv = filp->private_data; - drm_device_t *dev = priv->dev; - int i; - drm_buf_t *buf; - drm_device_dma_t *dma = dev->dma; - - for (i = d->granted_count; i < d->request_count; i++) { - buf = DRM(freelist_get)(&dma->bufs[order].freelist, - d->flags & _DRM_DMA_WAIT); - if (!buf) break; - if (buf->pending || buf->waiting) { - DRM_ERROR("Free buffer %d in use: filp %p (w%d, p%d)\n", - buf->idx, - buf->filp, - buf->waiting, - buf->pending); - } - buf->filp = filp; - if (copy_to_user(&d->request_indices[i], - &buf->idx, - sizeof(buf->idx))) - return -EFAULT; - - if (copy_to_user(&d->request_sizes[i], - &buf->total, - sizeof(buf->total))) - return -EFAULT; - - ++d->granted_count; - } - return 0; -} - - -int DRM(dma_get_buffers)(struct file *filp, drm_dma_t *dma) -{ - int order; - int retcode = 0; - int tmp_order; - - order = DRM(order)(dma->request_size); - - dma->granted_count = 0; - retcode = DRM(dma_get_buffers_of_order)(filp, dma, order); - - if (dma->granted_count < dma->request_count - && (dma->flags & _DRM_DMA_SMALLER_OK)) { - for (tmp_order = order - 1; - !retcode - && dma->granted_count < dma->request_count - && tmp_order >= DRM_MIN_ORDER; - --tmp_order) { - - retcode = DRM(dma_get_buffers_of_order)(filp, dma, - tmp_order); - } - } - - if (dma->granted_count < dma->request_count - && (dma->flags & _DRM_DMA_LARGER_OK)) { - for (tmp_order = order + 1; - !retcode - && dma->granted_count < dma->request_count - && tmp_order <= DRM_MAX_ORDER; - ++tmp_order) { - - retcode = DRM(dma_get_buffers_of_order)(filp, dma, - tmp_order); - } - } - return 0; -} - -#endif /* __HAVE_OLD_DMA */ #if __HAVE_DMA_IRQ @@ -538,10 +210,14 @@ int DRM(irq_install)( drm_device_t *dev, int irq ) dev->dma->this_buffer = NULL; #if __HAVE_DMA_IRQ_BH +#if !HAS_WORKQUEUE INIT_LIST_HEAD( &dev->tq.list ); dev->tq.sync = 0; dev->tq.routine = DRM(dma_immediate_bh); dev->tq.data = dev; +#else + INIT_WORK(&dev->work, DRM(dma_immediate_bh), dev); +#endif #endif #if __HAVE_VBL_IRQ @@ -655,7 +331,7 @@ int DRM(wait_vblank)( DRM_IOCTL_ARGS ) * for the same vblank sequence number; nothing to be done in * that case */ - list_for_each( ( (struct list_head *) vbl_sig ), &dev->vbl_sigs.head ) { + list_for_each_entry( vbl_sig, &dev->vbl_sigs.head, head ) { if (vbl_sig->sequence == vblwait.request.sequence && vbl_sig->info.si_signo == vblwait.request.signal && vbl_sig->task == current) @@ -706,19 +382,20 @@ done: void DRM(vbl_send_signals)( drm_device_t *dev ) { - struct list_head *tmp; + struct list_head *list, *tmp; drm_vbl_sig_t *vbl_sig; unsigned int vbl_seq = atomic_read( &dev->vbl_received ); unsigned long flags; spin_lock_irqsave( &dev->vbl_lock, flags ); - list_for_each_safe( ( (struct list_head *) vbl_sig ), tmp, &dev->vbl_sigs.head ) { + list_for_each_safe( list, tmp, &dev->vbl_sigs.head ) { + vbl_sig = list_entry( list, drm_vbl_sig_t, head ); if ( ( vbl_seq - vbl_sig->sequence ) <= (1<<23) ) { vbl_sig->info.si_code = vbl_seq; send_sig_info( vbl_sig->info.si_signo, &vbl_sig->info, vbl_sig->task ); - list_del( (struct list_head *) vbl_sig ); + list_del( list ); DRM_FREE( vbl_sig, sizeof(*vbl_sig) ); |