summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorIan Romanick <idr@us.ibm.com>2003-01-26 07:43:50 +0000
committerIan Romanick <idr@us.ibm.com>2003-01-26 07:43:50 +0000
commit3163f74b8eddf97bc9a71f01dfa02d2d33091b62 (patch)
treecf0741bf2f3dabe273f2dfaf698891ea19a4c2bc
parenta790687c9a4b369094a2a56e29a8240db573a54a (diff)
-rw-r--r--bsd-core/drmP.h11
-rw-r--r--bsd-core/drm_dma.c65
-rw-r--r--bsd/drm.h20
-rw-r--r--bsd/drmP.h11
-rw-r--r--bsd/drm_dma.h65
-rw-r--r--linux-core/drmP.h2
-rw-r--r--linux-core/drm_agpsupport.c67
-rw-r--r--linux-core/drm_dma.c54
-rw-r--r--linux-core/drm_proc.c5
-rw-r--r--linux-core/i810_dma.c4
-rw-r--r--linux/drm.h2
-rw-r--r--linux/drmP.h2
-rw-r--r--linux/drm_agpsupport.h67
-rw-r--r--linux/drm_dma.h54
-rw-r--r--linux/drm_proc.h5
-rw-r--r--linux/i810_dma.c4
-rw-r--r--linux/sis_ds.c22
-rw-r--r--shared-core/drm.h2
-rw-r--r--shared-core/radeon_cp.c39
-rw-r--r--shared-core/radeon_drv.h1
-rw-r--r--shared-core/radeon_mem.c10
-rw-r--r--shared-core/radeon_state.c201
-rw-r--r--shared/drm.h2
-rw-r--r--shared/radeon.h34
-rw-r--r--shared/radeon_cp.c39
-rw-r--r--shared/radeon_drv.h1
-rw-r--r--shared/radeon_mem.c10
-rw-r--r--shared/radeon_state.c201
28 files changed, 565 insertions, 435 deletions
diff --git a/bsd-core/drmP.h b/bsd-core/drmP.h
index b7b21da4e..541bc2ebe 100644
--- a/bsd-core/drmP.h
+++ b/bsd-core/drmP.h
@@ -392,6 +392,14 @@ typedef struct drm_map_list_entry {
drm_map_t *map;
} drm_map_list_entry_t;
+TAILQ_HEAD(drm_vbl_sig_list, drm_vbl_sig);
+typedef struct drm_vbl_sig {
+ TAILQ_ENTRY(drm_vbl_sig) link;
+ unsigned int sequence;
+ int signo;
+ int pid;
+} drm_vbl_sig_t;
+
struct drm_device {
#ifdef __NetBSD__
struct device device; /* NetBSD's softc is an extension of struct device */
@@ -469,6 +477,8 @@ struct drm_device {
#if __HAVE_VBL_IRQ
wait_queue_head_t vbl_queue; /* vbl wait channel */
atomic_t vbl_received;
+ struct drm_vbl_sig_list vbl_sig_list;
+ DRM_SPINTYPE vbl_lock;
#endif
cycles_t ctx_start;
cycles_t lck_start;
@@ -613,6 +623,7 @@ extern drm_buf_t *DRM(freelist_get)(drm_freelist_t *bl, int block);
#endif /* __HAVE_DMA */
#if __HAVE_VBL_IRQ
extern int DRM(vblank_wait)(drm_device_t *dev, unsigned int *vbl_seq);
+extern void DRM(vbl_send_signals)( drm_device_t *dev );
#endif
#if __REALLY_HAVE_AGP
diff --git a/bsd-core/drm_dma.c b/bsd-core/drm_dma.c
index 5632b5a2b..69c66c9a9 100644
--- a/bsd-core/drm_dma.c
+++ b/bsd-core/drm_dma.c
@@ -524,6 +524,11 @@ int DRM(irq_install)( drm_device_t *dev, int irq )
TASK_INIT(&dev->task, 0, DRM(dma_immediate_bh), dev);
#endif
+#if __HAVE_VBL_IRQ
+ DRM_SPININIT( dev->vbl_lock, "vblsig" );
+ TAILQ_INIT( &dev->vbl_sig_list );
+#endif
+
/* Before installing handler */
DRM(driver_irq_preinstall)( dev );
@@ -612,21 +617,67 @@ int DRM(wait_vblank)( DRM_IOCTL_ARGS )
DRM_COPY_FROM_USER_IOCTL( vblwait, (drm_wait_vblank_t *)data,
sizeof(vblwait) );
- if ( vblwait.type == _DRM_VBLANK_RELATIVE ) {
- vblwait.sequence += atomic_read( &dev->vbl_received );
+ if (vblwait.request.type & _DRM_VBLANK_RELATIVE) {
+ vblwait.request.sequence += atomic_read(&dev->vbl_received);
}
- ret = DRM(vblank_wait)( dev, &vblwait.sequence );
-
- microtime( &now );
- vblwait.tval_sec = now.tv_sec;
- vblwait.tval_usec = now.tv_usec;
+ flags = vblwait.request.type & _DRM_VBLANK_FLAGS_MASK;
+ if (flags & _DRM_VBLANK_SIGNAL) {
+ drm_vbl_sig_t *vbl_sig = DRM_MALLOC(sizeof(drm_vbl_sig_t));
+ if (vbl_sig == NULL)
+ return ENOMEM;
+ bzero(vbl_sig, sizeof(*vbl_sig));
+
+ vbl_sig->sequence = vblwait.request.sequence;
+ vbl_sig->signo = vblwait.request.signal;
+ vbl_sig->pid = DRM_CURRENTPID;
+
+ vblwait.reply.sequence = atomic_read(&dev->vbl_received);
+
+ DRM_SPINLOCK(&dev->vbl_lock);
+ TAILQ_INSERT_HEAD(&dev->vbl_sig_list, vbl_sig, link);
+ DRM_SPINUNLOCK(&dev->vbl_lock);
+ ret = 0;
+ } else {
+ ret = DRM(vblank_wait)(dev, &vblwait.request.sequence);
+
+ microtime(&now);
+ vblwait.reply.tval_sec = now.tv_sec;
+ vblwait.reply.tval_usec = now.tv_usec;
+ }
DRM_COPY_TO_USER_IOCTL( (drm_wait_vblank_t *)data, vblwait,
sizeof(vblwait) );
return ret;
}
+
+void DRM(vbl_send_signals)( drm_device_t *dev )
+{
+ drm_vbl_sig_t *vbl_sig;
+ unsigned int vbl_seq = atomic_read( &dev->vbl_received );
+ struct proc *p;
+
+ DRM_SPINLOCK(&dev->vbl_lock);
+
+ vbl_sig = TAILQ_FIRST(&dev->vbl_sig_list);
+ while (vbl_sig != NULL) {
+ drm_vbl_sig_t *next = TAILQ_NEXT(vbl_sig, link);
+
+ if ( ( vbl_seq - vbl_sig->sequence ) <= (1<<23) ) {
+ p = pfind(vbl_sig->pid);
+ if (p != NULL)
+ psignal(p, vbl_sig->signo);
+
+ TAILQ_REMOVE(&dev->vbl_sig_list, vbl_sig, link);
+ DRM_FREE(vbl_sig);
+ }
+ vbl_sig = next;
+ }
+
+ DRM_SPINUNLOCK(&dev->vbl_lock);
+}
+
#endif /* __HAVE_VBL_IRQ */
#else
diff --git a/bsd/drm.h b/bsd/drm.h
index f45f088ef..d1d669437 100644
--- a/bsd/drm.h
+++ b/bsd/drm.h
@@ -346,15 +346,29 @@ typedef struct drm_irq_busid {
} drm_irq_busid_t;
typedef enum {
- _DRM_VBLANK_ABSOLUTE = 0x0, /* Wait for specific vblank sequence number */
- _DRM_VBLANK_RELATIVE = 0x1 /* Wait for given number of vblanks */
+ _DRM_VBLANK_ABSOLUTE = 0x0, /* Wait for specific vblank sequence number */
+ _DRM_VBLANK_RELATIVE = 0x1, /* Wait for given number of vblanks */
+ _DRM_VBLANK_SIGNAL = 0x40000000 /* Send signal instead of blocking */
} drm_vblank_seq_type_t;
-typedef struct drm_radeon_vbl_wait {
+#define _DRM_VBLANK_FLAGS_MASK _DRM_VBLANK_SIGNAL
+
+struct drm_wait_vblank_request {
+ drm_vblank_seq_type_t type;
+ unsigned int sequence;
+ unsigned long signal;
+};
+
+struct drm_wait_vblank_reply {
drm_vblank_seq_type_t type;
unsigned int sequence;
long tval_sec;
long tval_usec;
+};
+
+typedef union drm_wait_vblank {
+ struct drm_wait_vblank_request request;
+ struct drm_wait_vblank_reply reply;
} drm_wait_vblank_t;
typedef struct drm_agp_mode {
diff --git a/bsd/drmP.h b/bsd/drmP.h
index b7b21da4e..541bc2ebe 100644
--- a/bsd/drmP.h
+++ b/bsd/drmP.h
@@ -392,6 +392,14 @@ typedef struct drm_map_list_entry {
drm_map_t *map;
} drm_map_list_entry_t;
+TAILQ_HEAD(drm_vbl_sig_list, drm_vbl_sig);
+typedef struct drm_vbl_sig {
+ TAILQ_ENTRY(drm_vbl_sig) link;
+ unsigned int sequence;
+ int signo;
+ int pid;
+} drm_vbl_sig_t;
+
struct drm_device {
#ifdef __NetBSD__
struct device device; /* NetBSD's softc is an extension of struct device */
@@ -469,6 +477,8 @@ struct drm_device {
#if __HAVE_VBL_IRQ
wait_queue_head_t vbl_queue; /* vbl wait channel */
atomic_t vbl_received;
+ struct drm_vbl_sig_list vbl_sig_list;
+ DRM_SPINTYPE vbl_lock;
#endif
cycles_t ctx_start;
cycles_t lck_start;
@@ -613,6 +623,7 @@ extern drm_buf_t *DRM(freelist_get)(drm_freelist_t *bl, int block);
#endif /* __HAVE_DMA */
#if __HAVE_VBL_IRQ
extern int DRM(vblank_wait)(drm_device_t *dev, unsigned int *vbl_seq);
+extern void DRM(vbl_send_signals)( drm_device_t *dev );
#endif
#if __REALLY_HAVE_AGP
diff --git a/bsd/drm_dma.h b/bsd/drm_dma.h
index 5632b5a2b..69c66c9a9 100644
--- a/bsd/drm_dma.h
+++ b/bsd/drm_dma.h
@@ -524,6 +524,11 @@ int DRM(irq_install)( drm_device_t *dev, int irq )
TASK_INIT(&dev->task, 0, DRM(dma_immediate_bh), dev);
#endif
+#if __HAVE_VBL_IRQ
+ DRM_SPININIT( dev->vbl_lock, "vblsig" );
+ TAILQ_INIT( &dev->vbl_sig_list );
+#endif
+
/* Before installing handler */
DRM(driver_irq_preinstall)( dev );
@@ -612,21 +617,67 @@ int DRM(wait_vblank)( DRM_IOCTL_ARGS )
DRM_COPY_FROM_USER_IOCTL( vblwait, (drm_wait_vblank_t *)data,
sizeof(vblwait) );
- if ( vblwait.type == _DRM_VBLANK_RELATIVE ) {
- vblwait.sequence += atomic_read( &dev->vbl_received );
+ if (vblwait.request.type & _DRM_VBLANK_RELATIVE) {
+ vblwait.request.sequence += atomic_read(&dev->vbl_received);
}
- ret = DRM(vblank_wait)( dev, &vblwait.sequence );
-
- microtime( &now );
- vblwait.tval_sec = now.tv_sec;
- vblwait.tval_usec = now.tv_usec;
+ flags = vblwait.request.type & _DRM_VBLANK_FLAGS_MASK;
+ if (flags & _DRM_VBLANK_SIGNAL) {
+ drm_vbl_sig_t *vbl_sig = DRM_MALLOC(sizeof(drm_vbl_sig_t));
+ if (vbl_sig == NULL)
+ return ENOMEM;
+ bzero(vbl_sig, sizeof(*vbl_sig));
+
+ vbl_sig->sequence = vblwait.request.sequence;
+ vbl_sig->signo = vblwait.request.signal;
+ vbl_sig->pid = DRM_CURRENTPID;
+
+ vblwait.reply.sequence = atomic_read(&dev->vbl_received);
+
+ DRM_SPINLOCK(&dev->vbl_lock);
+ TAILQ_INSERT_HEAD(&dev->vbl_sig_list, vbl_sig, link);
+ DRM_SPINUNLOCK(&dev->vbl_lock);
+ ret = 0;
+ } else {
+ ret = DRM(vblank_wait)(dev, &vblwait.request.sequence);
+
+ microtime(&now);
+ vblwait.reply.tval_sec = now.tv_sec;
+ vblwait.reply.tval_usec = now.tv_usec;
+ }
DRM_COPY_TO_USER_IOCTL( (drm_wait_vblank_t *)data, vblwait,
sizeof(vblwait) );
return ret;
}
+
+void DRM(vbl_send_signals)( drm_device_t *dev )
+{
+ drm_vbl_sig_t *vbl_sig;
+ unsigned int vbl_seq = atomic_read( &dev->vbl_received );
+ struct proc *p;
+
+ DRM_SPINLOCK(&dev->vbl_lock);
+
+ vbl_sig = TAILQ_FIRST(&dev->vbl_sig_list);
+ while (vbl_sig != NULL) {
+ drm_vbl_sig_t *next = TAILQ_NEXT(vbl_sig, link);
+
+ if ( ( vbl_seq - vbl_sig->sequence ) <= (1<<23) ) {
+ p = pfind(vbl_sig->pid);
+ if (p != NULL)
+ psignal(p, vbl_sig->signo);
+
+ TAILQ_REMOVE(&dev->vbl_sig_list, vbl_sig, link);
+ DRM_FREE(vbl_sig);
+ }
+ vbl_sig = next;
+ }
+
+ DRM_SPINUNLOCK(&dev->vbl_lock);
+}
+
#endif /* __HAVE_VBL_IRQ */
#else
diff --git a/linux-core/drmP.h b/linux-core/drmP.h
index 3f51b9b00..010450370 100644
--- a/linux-core/drmP.h
+++ b/linux-core/drmP.h
@@ -488,7 +488,6 @@ typedef struct drm_agp_mem {
typedef struct drm_agp_head {
agp_kern_info agp_info;
- const char *chipset;
drm_agp_mem_t *memory;
unsigned long mode;
int enabled;
@@ -593,6 +592,7 @@ typedef struct drm_device {
atomic_t vbl_received;
spinlock_t vbl_lock;
drm_vbl_sig_t vbl_sigs;
+ unsigned int vbl_pending;
#endif
cycles_t ctx_start;
cycles_t lck_start;
diff --git a/linux-core/drm_agpsupport.c b/linux-core/drm_agpsupport.c
index cd46110c4..6d6b5911f 100644
--- a/linux-core/drm_agpsupport.c
+++ b/linux-core/drm_agpsupport.c
@@ -260,60 +260,6 @@ drm_agp_head_t *DRM(agp_init)(void)
return NULL;
}
head->memory = NULL;
- switch (head->agp_info.chipset) {
- case INTEL_GENERIC: head->chipset = "Intel"; break;
- case INTEL_LX: head->chipset = "Intel 440LX"; break;
- case INTEL_BX: head->chipset = "Intel 440BX"; break;
- case INTEL_GX: head->chipset = "Intel 440GX"; break;
- case INTEL_I810: head->chipset = "Intel i810"; break;
-
- case INTEL_I815: head->chipset = "Intel i815"; break;
-#if LINUX_VERSION_CODE >= 0x02040f /* KERNEL_VERSION(2,4,15) */
- case INTEL_I820: head->chipset = "Intel i820"; break;
-#endif
- case INTEL_I840: head->chipset = "Intel i840"; break;
-#if LINUX_VERSION_CODE >= 0x02040f /* KERNEL_VERSION(2,4,15) */
- case INTEL_I845: head->chipset = "Intel i845"; break;
-#endif
- case INTEL_I850: head->chipset = "Intel i850"; break;
-
- case VIA_GENERIC: head->chipset = "VIA"; break;
- case VIA_VP3: head->chipset = "VIA VP3"; break;
- case VIA_MVP3: head->chipset = "VIA MVP3"; break;
- case VIA_MVP4: head->chipset = "VIA MVP4"; break;
- case VIA_APOLLO_KX133: head->chipset = "VIA Apollo KX133";
- break;
- case VIA_APOLLO_KT133: head->chipset = "VIA Apollo KT133";
- break;
-
- case VIA_APOLLO_PRO: head->chipset = "VIA Apollo Pro";
- break;
- case SIS_GENERIC: head->chipset = "SiS"; break;
- case AMD_GENERIC: head->chipset = "AMD"; break;
- case AMD_IRONGATE: head->chipset = "AMD Irongate"; break;
- case ALI_GENERIC: head->chipset = "ALi"; break;
- case ALI_M1541: head->chipset = "ALi M1541"; break;
-
-#if LINUX_VERSION_CODE >= 0x020402
- case ALI_M1621: head->chipset = "ALi M1621"; break;
- case ALI_M1631: head->chipset = "ALi M1631"; break;
- case ALI_M1632: head->chipset = "ALi M1632"; break;
- case ALI_M1641: head->chipset = "ALi M1641"; break;
- case ALI_M1647: head->chipset = "ALi M1647"; break;
- case ALI_M1651: head->chipset = "ALi M1651"; break;
-#endif
-
-#if LINUX_VERSION_CODE >= 0x020406
- case SVWRKS_HE: head->chipset = "Serverworks HE";
- break;
- case SVWRKS_LE: head->chipset = "Serverworks LE";
- break;
- case SVWRKS_GENERIC: head->chipset = "Serverworks Generic";
- break;
-#endif
-
- default: head->chipset = "Unknown"; break;
- }
#if LINUX_VERSION_CODE <= 0x020408
head->cant_use_aperture = 0;
head->page_mask = ~(0xfff);
@@ -321,13 +267,12 @@ drm_agp_head_t *DRM(agp_init)(void)
head->cant_use_aperture = head->agp_info.cant_use_aperture;
head->page_mask = head->agp_info.page_mask;
#endif
-
- DRM_INFO("AGP %d.%d on %s @ 0x%08lx %ZuMB\n",
- head->agp_info.version.major,
- head->agp_info.version.minor,
- head->chipset,
- head->agp_info.aper_base,
- head->agp_info.aper_size);
+
+ DRM_DEBUG("AGP %d.%d, aperture @ 0x%08lx %ZuMB\n",
+ head->agp_info.version.major,
+ head->agp_info.version.minor,
+ head->agp_info.aper_base,
+ head->agp_info.aper_size);
}
return head;
}
diff --git a/linux-core/drm_dma.c b/linux-core/drm_dma.c
index 46393a5db..33af34be2 100644
--- a/linux-core/drm_dma.c
+++ b/linux-core/drm_dma.c
@@ -544,6 +544,8 @@ int DRM(irq_install)( drm_device_t *dev, int irq )
spin_lock_init( &dev->vbl_lock );
INIT_LIST_HEAD( &dev->vbl_sigs.head );
+
+ dev->vbl_pending = 0;
#endif
/* Before installing handler */
@@ -636,10 +638,38 @@ int DRM(wait_vblank)( DRM_IOCTL_ARGS )
if ( flags & _DRM_VBLANK_SIGNAL ) {
unsigned long irqflags;
- drm_vbl_sig_t *vbl_sig = DRM_MALLOC( sizeof( drm_vbl_sig_t ) );
+ drm_vbl_sig_t *vbl_sig;
+
+ vblwait.reply.sequence = atomic_read( &dev->vbl_received );
+
+ spin_lock_irqsave( &dev->vbl_lock, irqflags );
+
+ /* Check if this task has already scheduled the same signal
+ * for the same vblank sequence number; nothing to be done in
+ * that case
+ */
+ list_for_each( ( (struct list_head *) vbl_sig ), &dev->vbl_sigs.head ) {
+ if (vbl_sig->sequence == vblwait.request.sequence
+ && vbl_sig->info.si_signo == vblwait.request.signal
+ && vbl_sig->task == current)
+ {
+ spin_unlock_irqrestore( &dev->vbl_lock, irqflags );
+ goto done;
+ }
+ }
+
+ if ( dev->vbl_pending >= 100 ) {
+ spin_unlock_irqrestore( &dev->vbl_lock, irqflags );
+ return -EBUSY;
+ }
+
+ dev->vbl_pending++;
- if ( !vbl_sig )
+ spin_unlock_irqrestore( &dev->vbl_lock, irqflags );
+
+ if ( !( vbl_sig = DRM_MALLOC( sizeof( drm_vbl_sig_t ) ) ) ) {
return -ENOMEM;
+ }
memset( (void *)vbl_sig, 0, sizeof(*vbl_sig) );
@@ -647,9 +677,6 @@ int DRM(wait_vblank)( DRM_IOCTL_ARGS )
vbl_sig->info.si_signo = vblwait.request.signal;
vbl_sig->task = current;
- vblwait.reply.sequence = atomic_read( &dev->vbl_received );
-
- /* Hook signal entry into list */
spin_lock_irqsave( &dev->vbl_lock, irqflags );
list_add_tail( (struct list_head *) vbl_sig, &dev->vbl_sigs.head );
@@ -663,6 +690,7 @@ int DRM(wait_vblank)( DRM_IOCTL_ARGS )
vblwait.reply.tval_usec = now.tv_usec;
}
+done:
DRM_COPY_TO_USER_IOCTL( (drm_wait_vblank_t *)data, vblwait,
sizeof(vblwait) );
@@ -671,25 +699,23 @@ int DRM(wait_vblank)( DRM_IOCTL_ARGS )
void DRM(vbl_send_signals)( drm_device_t *dev )
{
- struct list_head *entry, *tmp;
+ struct list_head *tmp;
drm_vbl_sig_t *vbl_sig;
unsigned int vbl_seq = atomic_read( &dev->vbl_received );
unsigned long flags;
spin_lock_irqsave( &dev->vbl_lock, flags );
- list_for_each_safe( entry, tmp, &dev->vbl_sigs.head ) {
-
- vbl_sig = (drm_vbl_sig_t *) entry;
-
+ list_for_each_safe( ( (struct list_head *) vbl_sig ), tmp, &dev->vbl_sigs.head ) {
if ( ( vbl_seq - vbl_sig->sequence ) <= (1<<23) ) {
-
- vbl_sig->info.si_code = atomic_read( &dev->vbl_received );
+ vbl_sig->info.si_code = vbl_seq;
send_sig_info( vbl_sig->info.si_signo, &vbl_sig->info, vbl_sig->task );
- list_del( entry );
+ list_del( (struct list_head *) vbl_sig );
+
+ DRM_FREE( vbl_sig );
- DRM_FREE( entry );
+ dev->vbl_pending--;
}
}
diff --git a/linux-core/drm_proc.c b/linux-core/drm_proc.c
index 24e8556fc..d29db7b7b 100644
--- a/linux-core/drm_proc.c
+++ b/linux-core/drm_proc.c
@@ -449,7 +449,8 @@ static int DRM(_vma_info)(char *buf, char **start, off_t offset, int request,
for (i = vma->vm_start; i < vma->vm_end; i += PAGE_SIZE) {
pgd = pgd_offset(vma->vm_mm, i);
pmd = pmd_offset(pgd, i);
- pte = pte_offset(pmd, i);
+ preempt_disable();
+ pte = pte_offset_map(pmd, i);
if (pte_present(*pte)) {
address = __pa(pte_page(*pte))
+ (i & (PAGE_SIZE-1));
@@ -465,6 +466,8 @@ static int DRM(_vma_info)(char *buf, char **start, off_t offset, int request,
} else {
DRM_PROC_PRINT(" 0x%08lx\n", i);
}
+ pte_unmap(pte);
+ preempt_enable();
}
#endif
}
diff --git a/linux-core/i810_dma.c b/linux-core/i810_dma.c
index 13f5f64fb..ffb7c7086 100644
--- a/linux-core/i810_dma.c
+++ b/linux-core/i810_dma.c
@@ -38,6 +38,7 @@
#include "i810_drv.h"
#include <linux/interrupt.h> /* For task queue support */
#include <linux/delay.h>
+#include <linux/pagemap.h>
#ifdef DO_MUNMAP_4_ARGS
#define DO_MUNMAP(m, a, l) do_munmap(m, a, l, 1)
@@ -1184,7 +1185,8 @@ int i810_ov0_info(struct inode *inode, struct file *filp,
data.offset = dev_priv->overlay_offset;
data.physical = dev_priv->overlay_physical;
- copy_to_user((drm_i810_overlay_t *)arg,&data,sizeof(data));
+ if (copy_to_user((drm_i810_overlay_t *)arg,&data,sizeof(data)))
+ return -EFAULT;
return 0;
}
diff --git a/linux/drm.h b/linux/drm.h
index d3c9f1580..d1d669437 100644
--- a/linux/drm.h
+++ b/linux/drm.h
@@ -348,7 +348,7 @@ typedef struct drm_irq_busid {
typedef enum {
_DRM_VBLANK_ABSOLUTE = 0x0, /* Wait for specific vblank sequence number */
_DRM_VBLANK_RELATIVE = 0x1, /* Wait for given number of vblanks */
- _DRM_VBLANK_SIGNAL = 0x80000000 /* Send signal instead of blocking */
+ _DRM_VBLANK_SIGNAL = 0x40000000 /* Send signal instead of blocking */
} drm_vblank_seq_type_t;
#define _DRM_VBLANK_FLAGS_MASK _DRM_VBLANK_SIGNAL
diff --git a/linux/drmP.h b/linux/drmP.h
index 3f51b9b00..010450370 100644
--- a/linux/drmP.h
+++ b/linux/drmP.h
@@ -488,7 +488,6 @@ typedef struct drm_agp_mem {
typedef struct drm_agp_head {
agp_kern_info agp_info;
- const char *chipset;
drm_agp_mem_t *memory;
unsigned long mode;
int enabled;
@@ -593,6 +592,7 @@ typedef struct drm_device {
atomic_t vbl_received;
spinlock_t vbl_lock;
drm_vbl_sig_t vbl_sigs;
+ unsigned int vbl_pending;
#endif
cycles_t ctx_start;
cycles_t lck_start;
diff --git a/linux/drm_agpsupport.h b/linux/drm_agpsupport.h
index cd46110c4..6d6b5911f 100644
--- a/linux/drm_agpsupport.h
+++ b/linux/drm_agpsupport.h
@@ -260,60 +260,6 @@ drm_agp_head_t *DRM(agp_init)(void)
return NULL;
}
head->memory = NULL;
- switch (head->agp_info.chipset) {
- case INTEL_GENERIC: head->chipset = "Intel"; break;
- case INTEL_LX: head->chipset = "Intel 440LX"; break;
- case INTEL_BX: head->chipset = "Intel 440BX"; break;
- case INTEL_GX: head->chipset = "Intel 440GX"; break;
- case INTEL_I810: head->chipset = "Intel i810"; break;
-
- case INTEL_I815: head->chipset = "Intel i815"; break;
-#if LINUX_VERSION_CODE >= 0x02040f /* KERNEL_VERSION(2,4,15) */
- case INTEL_I820: head->chipset = "Intel i820"; break;
-#endif
- case INTEL_I840: head->chipset = "Intel i840"; break;
-#if LINUX_VERSION_CODE >= 0x02040f /* KERNEL_VERSION(2,4,15) */
- case INTEL_I845: head->chipset = "Intel i845"; break;
-#endif
- case INTEL_I850: head->chipset = "Intel i850"; break;
-
- case VIA_GENERIC: head->chipset = "VIA"; break;
- case VIA_VP3: head->chipset = "VIA VP3"; break;
- case VIA_MVP3: head->chipset = "VIA MVP3"; break;
- case VIA_MVP4: head->chipset = "VIA MVP4"; break;
- case VIA_APOLLO_KX133: head->chipset = "VIA Apollo KX133";
- break;
- case VIA_APOLLO_KT133: head->chipset = "VIA Apollo KT133";
- break;
-
- case VIA_APOLLO_PRO: head->chipset = "VIA Apollo Pro";
- break;
- case SIS_GENERIC: head->chipset = "SiS"; break;
- case AMD_GENERIC: head->chipset = "AMD"; break;
- case AMD_IRONGATE: head->chipset = "AMD Irongate"; break;
- case ALI_GENERIC: head->chipset = "ALi"; break;
- case ALI_M1541: head->chipset = "ALi M1541"; break;
-
-#if LINUX_VERSION_CODE >= 0x020402
- case ALI_M1621: head->chipset = "ALi M1621"; break;
- case ALI_M1631: head->chipset = "ALi M1631"; break;
- case ALI_M1632: head->chipset = "ALi M1632"; break;
- case ALI_M1641: head->chipset = "ALi M1641"; break;
- case ALI_M1647: head->chipset = "ALi M1647"; break;
- case ALI_M1651: head->chipset = "ALi M1651"; break;
-#endif
-
-#if LINUX_VERSION_CODE >= 0x020406
- case SVWRKS_HE: head->chipset = "Serverworks HE";
- break;
- case SVWRKS_LE: head->chipset = "Serverworks LE";
- break;
- case SVWRKS_GENERIC: head->chipset = "Serverworks Generic";
- break;
-#endif
-
- default: head->chipset = "Unknown"; break;
- }
#if LINUX_VERSION_CODE <= 0x020408
head->cant_use_aperture = 0;
head->page_mask = ~(0xfff);
@@ -321,13 +267,12 @@ drm_agp_head_t *DRM(agp_init)(void)
head->cant_use_aperture = head->agp_info.cant_use_aperture;
head->page_mask = head->agp_info.page_mask;
#endif
-
- DRM_INFO("AGP %d.%d on %s @ 0x%08lx %ZuMB\n",
- head->agp_info.version.major,
- head->agp_info.version.minor,
- head->chipset,
- head->agp_info.aper_base,
- head->agp_info.aper_size);
+
+ DRM_DEBUG("AGP %d.%d, aperture @ 0x%08lx %ZuMB\n",
+ head->agp_info.version.major,
+ head->agp_info.version.minor,
+ head->agp_info.aper_base,
+ head->agp_info.aper_size);
}
return head;
}
diff --git a/linux/drm_dma.h b/linux/drm_dma.h
index 46393a5db..33af34be2 100644
--- a/linux/drm_dma.h
+++ b/linux/drm_dma.h
@@ -544,6 +544,8 @@ int DRM(irq_install)( drm_device_t *dev, int irq )
spin_lock_init( &dev->vbl_lock );
INIT_LIST_HEAD( &dev->vbl_sigs.head );
+
+ dev->vbl_pending = 0;
#endif
/* Before installing handler */
@@ -636,10 +638,38 @@ int DRM(wait_vblank)( DRM_IOCTL_ARGS )
if ( flags & _DRM_VBLANK_SIGNAL ) {
unsigned long irqflags;
- drm_vbl_sig_t *vbl_sig = DRM_MALLOC( sizeof( drm_vbl_sig_t ) );
+ drm_vbl_sig_t *vbl_sig;
+
+ vblwait.reply.sequence = atomic_read( &dev->vbl_received );
+
+ spin_lock_irqsave( &dev->vbl_lock, irqflags );
+
+ /* Check if this task has already scheduled the same signal
+ * for the same vblank sequence number; nothing to be done in
+ * that case
+ */
+ list_for_each( ( (struct list_head *) vbl_sig ), &dev->vbl_sigs.head ) {
+ if (vbl_sig->sequence == vblwait.request.sequence
+ && vbl_sig->info.si_signo == vblwait.request.signal
+ && vbl_sig->task == current)
+ {
+ spin_unlock_irqrestore( &dev->vbl_lock, irqflags );
+ goto done;
+ }
+ }
+
+ if ( dev->vbl_pending >= 100 ) {
+ spin_unlock_irqrestore( &dev->vbl_lock, irqflags );
+ return -EBUSY;
+ }
+
+ dev->vbl_pending++;
- if ( !vbl_sig )
+ spin_unlock_irqrestore( &dev->vbl_lock, irqflags );
+
+ if ( !( vbl_sig = DRM_MALLOC( sizeof( drm_vbl_sig_t ) ) ) ) {
return -ENOMEM;
+ }
memset( (void *)vbl_sig, 0, sizeof(*vbl_sig) );
@@ -647,9 +677,6 @@ int DRM(wait_vblank)( DRM_IOCTL_ARGS )
vbl_sig->info.si_signo = vblwait.request.signal;
vbl_sig->task = current;
- vblwait.reply.sequence = atomic_read( &dev->vbl_received );
-
- /* Hook signal entry into list */
spin_lock_irqsave( &dev->vbl_lock, irqflags );
list_add_tail( (struct list_head *) vbl_sig, &dev->vbl_sigs.head );
@@ -663,6 +690,7 @@ int DRM(wait_vblank)( DRM_IOCTL_ARGS )
vblwait.reply.tval_usec = now.tv_usec;
}
+done:
DRM_COPY_TO_USER_IOCTL( (drm_wait_vblank_t *)data, vblwait,
sizeof(vblwait) );
@@ -671,25 +699,23 @@ int DRM(wait_vblank)( DRM_IOCTL_ARGS )
void DRM(vbl_send_signals)( drm_device_t *dev )
{
- struct list_head *entry, *tmp;
+ struct list_head *tmp;
drm_vbl_sig_t *vbl_sig;
unsigned int vbl_seq = atomic_read( &dev->vbl_received );
unsigned long flags;
spin_lock_irqsave( &dev->vbl_lock, flags );
- list_for_each_safe( entry, tmp, &dev->vbl_sigs.head ) {
-
- vbl_sig = (drm_vbl_sig_t *) entry;
-
+ list_for_each_safe( ( (struct list_head *) vbl_sig ), tmp, &dev->vbl_sigs.head ) {
if ( ( vbl_seq - vbl_sig->sequence ) <= (1<<23) ) {
-
- vbl_sig->info.si_code = atomic_read( &dev->vbl_received );
+ vbl_sig->info.si_code = vbl_seq;
send_sig_info( vbl_sig->info.si_signo, &vbl_sig->info, vbl_sig->task );
- list_del( entry );
+ list_del( (struct list_head *) vbl_sig );
+
+ DRM_FREE( vbl_sig );
- DRM_FREE( entry );
+ dev->vbl_pending--;
}
}
diff --git a/linux/drm_proc.h b/linux/drm_proc.h
index 24e8556fc..d29db7b7b 100644
--- a/linux/drm_proc.h
+++ b/linux/drm_proc.h
@@ -449,7 +449,8 @@ static int DRM(_vma_info)(char *buf, char **start, off_t offset, int request,
for (i = vma->vm_start; i < vma->vm_end; i += PAGE_SIZE) {
pgd = pgd_offset(vma->vm_mm, i);
pmd = pmd_offset(pgd, i);
- pte = pte_offset(pmd, i);
+ preempt_disable();
+ pte = pte_offset_map(pmd, i);
if (pte_present(*pte)) {
address = __pa(pte_page(*pte))
+ (i & (PAGE_SIZE-1));
@@ -465,6 +466,8 @@ static int DRM(_vma_info)(char *buf, char **start, off_t offset, int request,
} else {
DRM_PROC_PRINT(" 0x%08lx\n", i);
}
+ pte_unmap(pte);
+ preempt_enable();
}
#endif
}
diff --git a/linux/i810_dma.c b/linux/i810_dma.c
index 13f5f64fb..ffb7c7086 100644
--- a/linux/i810_dma.c
+++ b/linux/i810_dma.c
@@ -38,6 +38,7 @@
#include "i810_drv.h"
#include <linux/interrupt.h> /* For task queue support */
#include <linux/delay.h>
+#include <linux/pagemap.h>
#ifdef DO_MUNMAP_4_ARGS
#define DO_MUNMAP(m, a, l) do_munmap(m, a, l, 1)
@@ -1184,7 +1185,8 @@ int i810_ov0_info(struct inode *inode, struct file *filp,
data.offset = dev_priv->overlay_offset;
data.physical = dev_priv->overlay_physical;
- copy_to_user((drm_i810_overlay_t *)arg,&data,sizeof(data));
+ if (copy_to_user((drm_i810_overlay_t *)arg,&data,sizeof(data)))
+ return -EFAULT;
return 0;
}
diff --git a/linux/sis_ds.c b/linux/sis_ds.c
index 95880a482..f55cf6ab4 100644
--- a/linux/sis_ds.c
+++ b/linux/sis_ds.c
@@ -50,15 +50,16 @@ set_t *setInit(void)
set_t *set;
set = (set_t *)MALLOC(sizeof(set_t));
- for(i = 0; i < SET_SIZE; i++){
- set->list[i].free_next = i+1;
- set->list[i].alloc_next = -1;
- }
- set->list[SET_SIZE-1].free_next = -1;
- set->free = 0;
- set->alloc = -1;
- set->trace = -1;
-
+ if (set) {
+ for(i = 0; i < SET_SIZE; i++){
+ set->list[i].free_next = i+1;
+ set->list[i].alloc_next = -1;
+ }
+ set->list[SET_SIZE-1].free_next = -1;
+ set->free = 0;
+ set->alloc = -1;
+ set->trace = -1;
+ }
return set;
}
@@ -172,7 +173,8 @@ static void *calloc(size_t nmemb, size_t size)
{
void *addr;
addr = kmalloc(nmemb*size, GFP_KERNEL);
- memset(addr, 0, nmemb*size);
+ if (addr)
+ memset(addr, 0, nmemb*size);
return addr;
}
#define free(n) kfree(n)
diff --git a/shared-core/drm.h b/shared-core/drm.h
index d3c9f1580..d1d669437 100644
--- a/shared-core/drm.h
+++ b/shared-core/drm.h
@@ -348,7 +348,7 @@ typedef struct drm_irq_busid {
typedef enum {
_DRM_VBLANK_ABSOLUTE = 0x0, /* Wait for specific vblank sequence number */
_DRM_VBLANK_RELATIVE = 0x1, /* Wait for given number of vblanks */
- _DRM_VBLANK_SIGNAL = 0x80000000 /* Send signal instead of blocking */
+ _DRM_VBLANK_SIGNAL = 0x40000000 /* Send signal instead of blocking */
} drm_vblank_seq_type_t;
#define _DRM_VBLANK_FLAGS_MASK _DRM_VBLANK_SIGNAL
diff --git a/shared-core/radeon_cp.c b/shared-core/radeon_cp.c
index 7c869c029..6994fe9eb 100644
--- a/shared-core/radeon_cp.c
+++ b/shared-core/radeon_cp.c
@@ -1354,6 +1354,9 @@ int radeon_cp_stop( DRM_IOCTL_ARGS )
DRM_COPY_FROM_USER_IOCTL( stop, (drm_radeon_cp_stop_t *)data, sizeof(stop) );
+ if (!dev_priv->cp_running)
+ return 0;
+
/* Flush any pending CP commands. This ensures any outstanding
* commands are exectuted by the engine before we turn it off.
*/
@@ -1381,6 +1384,39 @@ int radeon_cp_stop( DRM_IOCTL_ARGS )
return 0;
}
+
+void radeon_do_release( drm_device_t *dev )
+{
+ drm_radeon_private_t *dev_priv = dev->dev_private;
+ int ret;
+
+ if (dev_priv) {
+ if (dev_priv->cp_running) {
+ /* Stop the cp */
+ while ((ret = radeon_do_cp_idle( dev_priv )) != 0) {
+ DRM_DEBUG("radeon_do_cp_idle %d\n", ret);
+#ifdef __linux__
+ schedule();
+#else
+ tsleep(&ret, PZERO, "rdnrel", 1);
+#endif
+ }
+ radeon_do_cp_stop( dev_priv );
+ radeon_do_engine_reset( dev );
+ }
+
+ /* Disable *all* interrupts */
+ RADEON_WRITE( RADEON_GEN_INT_CNTL, 0 );
+
+ /* Free memory heap structures */
+ radeon_mem_takedown( &(dev_priv->agp_heap) );
+ radeon_mem_takedown( &(dev_priv->fb_heap) );
+
+ /* deallocate kernel resources */
+ radeon_do_cleanup_cp( dev );
+ }
+}
+
/* Just reset the CP ring. Called as part of an X Server engine reset.
*/
int radeon_cp_reset( DRM_IOCTL_ARGS )
@@ -1412,9 +1448,6 @@ int radeon_cp_idle( DRM_IOCTL_ARGS )
LOCK_TEST_WITH_RETURN( dev );
-/* if (dev->irq) */
-/* radeon_emit_and_wait_irq( dev ); */
-
return radeon_do_cp_idle( dev_priv );
}
diff --git a/shared-core/radeon_drv.h b/shared-core/radeon_drv.h
index 65f3c9265..22c5d04fa 100644
--- a/shared-core/radeon_drv.h
+++ b/shared-core/radeon_drv.h
@@ -193,6 +193,7 @@ extern int radeon_emit_and_wait_irq(drm_device_t *dev);
extern int radeon_wait_irq(drm_device_t *dev, int swi_nr);
extern int radeon_emit_irq(drm_device_t *dev);
+extern void radeon_do_release(drm_device_t *dev);
/* Flags for stats.boxes
*/
diff --git a/shared-core/radeon_mem.c b/shared-core/radeon_mem.c
index 5c07c1afe..7ca10753a 100644
--- a/shared-core/radeon_mem.c
+++ b/shared-core/radeon_mem.c
@@ -130,16 +130,6 @@ static void free_block( struct mem_block *p )
}
}
-static void print_heap( struct mem_block *heap )
-{
- struct mem_block *p;
-
- for (p = heap->next ; p != heap ; p = p->next)
- DRM_DEBUG("0x%x..0x%x (0x%x) -- owner %d\n",
- p->start, p->start + p->size,
- p->size, p->pid);
-}
-
/* Initialize. How to check for an uninitialized heap?
*/
static int init_heap(struct mem_block **heap, int start, int size)
diff --git a/shared-core/radeon_state.c b/shared-core/radeon_state.c
index 7b480a7e9..1fe007b30 100644
--- a/shared-core/radeon_state.c
+++ b/shared-core/radeon_state.c
@@ -1074,19 +1074,30 @@ static int radeon_cp_dispatch_texture( drm_device_t *dev,
const u8 *data;
int size, dwords, tex_width, blit_width;
u32 y, height;
- int ret = 0, i;
+ int i;
RING_LOCALS;
dev_priv->stats.boxes |= RADEON_BOX_TEXTURE_LOAD;
- /* FIXME: Be smarter about this...
+ /* Flush the pixel cache. This ensures no pixel data gets mixed
+ * up with the texture data from the host data blit, otherwise
+ * part of the texture image may be corrupted.
*/
- buf = radeon_freelist_get( dev );
- if ( !buf ) return DRM_ERR(EAGAIN);
+ BEGIN_RING( 4 );
+ RADEON_FLUSH_CACHE();
+ RADEON_WAIT_UNTIL_IDLE();
+ ADVANCE_RING();
+
+#ifdef __BIG_ENDIAN
+ /* The Mesa texture functions provide the data in little endian as the
+ * chip wants it, but we need to compensate for the fact that the CP
+ * ring gets byte-swapped
+ */
+ BEGIN_RING( 2 );
+ OUT_RING_REG( RADEON_RBBM_GUICNTL, RADEON_HOST_DATA_SWAP_32BIT );
+ ADVANCE_RING();
+#endif
- DRM_DEBUG( "tex: ofs=0x%x p=%d f=%d x=%hd y=%hd w=%hd h=%hd\n",
- tex->offset >> 10, tex->pitch, tex->format,
- image->x, image->y, image->width, image->height );
/* The compiler won't optimize away a division by a variable,
* even if the only legal values are powers of two. Thus, we'll
@@ -1120,127 +1131,113 @@ static int radeon_cp_dispatch_texture( drm_device_t *dev,
return DRM_ERR(EINVAL);
}
- DRM_DEBUG( " tex=%dx%d blit=%d\n",
- tex_width, tex->height, blit_width );
-
- /* Flush the pixel cache. This ensures no pixel data gets mixed
- * up with the texture data from the host data blit, otherwise
- * part of the texture image may be corrupted.
- */
- BEGIN_RING( 4 );
-
- RADEON_FLUSH_CACHE();
- RADEON_WAIT_UNTIL_IDLE();
-
- ADVANCE_RING();
-
-#ifdef __BIG_ENDIAN
- /* The Mesa texture functions provide the data in little endian as the
- * chip wants it, but we need to compensate for the fact that the CP
- * ring gets byte-swapped
- */
- BEGIN_RING( 2 );
- OUT_RING_REG( RADEON_RBBM_GUICNTL, RADEON_HOST_DATA_SWAP_32BIT );
- ADVANCE_RING();
-#endif
-
- /* Make a copy of the parameters in case we have to update them
- * for a multi-pass texture blit.
- */
- y = image->y;
- height = image->height;
- data = (const u8 *)image->data;
-
- size = height * blit_width;
+ DRM_DEBUG("tex=%dx%d blit=%d\n", tex_width, tex->height, blit_width );
- if ( size > RADEON_MAX_TEXTURE_SIZE ) {
- /* Texture image is too large, do a multipass upload */
- ret = DRM_ERR(EAGAIN);
+ do {
+ DRM_DEBUG( "tex: ofs=0x%x p=%d f=%d x=%hd y=%hd w=%hd h=%hd\n",
+ tex->offset >> 10, tex->pitch, tex->format,
+ image->x, image->y, image->width, image->height );
- /* Adjust the blit size to fit the indirect buffer */
- height = RADEON_MAX_TEXTURE_SIZE / blit_width;
+ /* Make a copy of the parameters in case we have to
+ * update them for a multi-pass texture blit.
+ */
+ y = image->y;
+ height = image->height;
+ data = (const u8 *)image->data;
+
size = height * blit_width;
+ if ( size > RADEON_MAX_TEXTURE_SIZE ) {
+ height = RADEON_MAX_TEXTURE_SIZE / blit_width;
+ size = height * blit_width;
+ } else if ( size < 4 && size > 0 ) {
+ size = 4;
+ } else if ( size == 0 ) {
+ return 0;
+ }
+
/* Update the input parameters for next time */
image->y += height;
image->height -= height;
- image->data = (const char *)image->data + size;
+ image->data += size;
- if ( DRM_COPY_TO_USER( tex->image, image, sizeof(*image) ) ) {
- DRM_ERROR( "EFAULT on tex->image\n" );
- return DRM_ERR(EFAULT);
+ buf = radeon_freelist_get( dev );
+ if ( 0 && !buf ) {
+ radeon_do_cp_idle( dev_priv );
+ buf = radeon_freelist_get( dev );
+ }
+ if ( !buf ) {
+ DRM_DEBUG("radeon_cp_dispatch_texture: EAGAIN\n");
+ DRM_COPY_TO_USER( tex->image, image, sizeof(*image) );
+ return DRM_ERR(EAGAIN);
}
- } else if ( size < 4 && size > 0 ) {
- size = 4;
- }
- dwords = size / 4;
- /* Dispatch the indirect buffer.
- */
- buffer = (u32 *)((char *)dev_priv->buffers->handle + buf->offset);
-
- buffer[0] = CP_PACKET3( RADEON_CNTL_HOSTDATA_BLT, dwords + 6 );
- buffer[1] = (RADEON_GMC_DST_PITCH_OFFSET_CNTL |
- RADEON_GMC_BRUSH_NONE |
- (format << 8) |
- RADEON_GMC_SRC_DATATYPE_COLOR |
- RADEON_ROP3_S |
- RADEON_DP_SRC_SOURCE_HOST_DATA |
- RADEON_GMC_CLR_CMP_CNTL_DIS |
- RADEON_GMC_WR_MSK_DIS);
-
- buffer[2] = (tex->pitch << 22) | (tex->offset >> 10);
- buffer[3] = 0xffffffff;
- buffer[4] = 0xffffffff;
- buffer[5] = (y << 16) | image->x;
- buffer[6] = (height << 16) | image->width;
- buffer[7] = dwords;
-
- buffer += 8;
-
- if ( tex_width >= 32 ) {
- /* Texture image width is larger than the minimum, so we
- * can upload it directly.
- */
- if ( DRM_COPY_FROM_USER( buffer, data, dwords * sizeof(u32) ) ) {
- DRM_ERROR( "EFAULT on data, %d dwords\n", dwords );
- return DRM_ERR(EFAULT);
- }
- } else {
- /* Texture image width is less than the minimum, so we
- * need to pad out each image scanline to the minimum
- * width.
+ /* Dispatch the indirect buffer.
*/
- for ( i = 0 ; i < tex->height ; i++ ) {
- if ( DRM_COPY_FROM_USER( buffer, data, tex_width ) ) {
- DRM_ERROR( "EFAULT on pad, %d bytes\n",
- tex_width );
+ buffer = (u32*)((char*)dev_priv->buffers->handle + buf->offset);
+ dwords = size / 4;
+ buffer[0] = CP_PACKET3( RADEON_CNTL_HOSTDATA_BLT, dwords + 6 );
+ buffer[1] = (RADEON_GMC_DST_PITCH_OFFSET_CNTL |
+ RADEON_GMC_BRUSH_NONE |
+ (format << 8) |
+ RADEON_GMC_SRC_DATATYPE_COLOR |
+ RADEON_ROP3_S |
+ RADEON_DP_SRC_SOURCE_HOST_DATA |
+ RADEON_GMC_CLR_CMP_CNTL_DIS |
+ RADEON_GMC_WR_MSK_DIS);
+
+ buffer[2] = (tex->pitch << 22) | (tex->offset >> 10);
+ buffer[3] = 0xffffffff;
+ buffer[4] = 0xffffffff;
+ buffer[5] = (y << 16) | image->x;
+ buffer[6] = (height << 16) | image->width;
+ buffer[7] = dwords;
+ buffer += 8;
+
+ if ( tex_width >= 32 ) {
+ /* Texture image width is larger than the minimum, so we
+ * can upload it directly.
+ */
+ if ( DRM_COPY_FROM_USER( buffer, data,
+ dwords * sizeof(u32) ) ) {
+ DRM_ERROR( "EFAULT on data, %d dwords\n",
+ dwords );
return DRM_ERR(EFAULT);
}
- buffer += 8;
- data += tex_width;
+ } else {
+ /* Texture image width is less than the minimum, so we
+ * need to pad out each image scanline to the minimum
+ * width.
+ */
+ for ( i = 0 ; i < tex->height ; i++ ) {
+ if ( DRM_COPY_FROM_USER( buffer, data,
+ tex_width ) ) {
+ DRM_ERROR( "EFAULT on pad, %d bytes\n",
+ tex_width );
+ return DRM_ERR(EFAULT);
+ }
+ buffer += 8;
+ data += tex_width;
+ }
}
- }
- buf->pid = DRM_CURRENTPID;
- buf->used = (dwords + 8) * sizeof(u32);
+ buf->pid = DRM_CURRENTPID;
+ buf->used = (dwords + 8) * sizeof(u32);
+ radeon_cp_dispatch_indirect( dev, buf, 0, buf->used );
+ radeon_cp_discard_buffer( dev, buf );
- radeon_cp_dispatch_indirect( dev, buf, 0, buf->used );
- radeon_cp_discard_buffer( dev, buf );
+ } while (image->height > 0);
/* Flush the pixel cache after the blit completes. This ensures
* the texture data is written out to memory before rendering
* continues.
*/
BEGIN_RING( 4 );
-
RADEON_FLUSH_CACHE();
RADEON_WAIT_UNTIL_2D_IDLE();
-
ADVANCE_RING();
-
- return ret;
+ return 0;
}
diff --git a/shared/drm.h b/shared/drm.h
index d3c9f1580..d1d669437 100644
--- a/shared/drm.h
+++ b/shared/drm.h
@@ -348,7 +348,7 @@ typedef struct drm_irq_busid {
typedef enum {
_DRM_VBLANK_ABSOLUTE = 0x0, /* Wait for specific vblank sequence number */
_DRM_VBLANK_RELATIVE = 0x1, /* Wait for given number of vblanks */
- _DRM_VBLANK_SIGNAL = 0x80000000 /* Send signal instead of blocking */
+ _DRM_VBLANK_SIGNAL = 0x40000000 /* Send signal instead of blocking */
} drm_vblank_seq_type_t;
#define _DRM_VBLANK_FLAGS_MASK _DRM_VBLANK_SIGNAL
diff --git a/shared/radeon.h b/shared/radeon.h
index fe71687a1..2b5131a9f 100644
--- a/shared/radeon.h
+++ b/shared/radeon.h
@@ -51,7 +51,7 @@
#define DRIVER_DATE "20020828"
#define DRIVER_MAJOR 1
-#define DRIVER_MINOR 7
+#define DRIVER_MINOR 8
#define DRIVER_PATCHLEVEL 0
/* Interface history:
@@ -77,6 +77,7 @@
* and R200_PP_CUBIC_OFFSET_F1_[0..5].
* Added packets R200_EMIT_PP_CUBIC_FACES_[0..5] and
* R200_EMIT_PP_CUBIC_OFFSETS_[0..5]. (brian)
+ * 1.8 - Remove need to call cleanup ioctls on last client exit (keith)
*/
#define DRIVER_IOCTLS \
[DRM_IOCTL_NR(DRM_IOCTL_DMA)] = { radeon_cp_buffers, 1, 0 }, \
@@ -105,11 +106,6 @@
[DRM_IOCTL_NR(DRM_IOCTL_RADEON_IRQ_WAIT)] = { radeon_irq_wait, 1, 0 },
-#define USE_IRQS 1
-#if USE_IRQS
-#define __HAVE_DMA_IRQ 1
-#define __HAVE_VBL_IRQ 1
-#define __HAVE_SHARED_IRQ 1
/* When a client dies:
* - Check for and clean up flipped page state
@@ -117,35 +113,35 @@
*
* DRM infrastructure takes care of reclaiming dma buffers.
*/
-#define DRIVER_PRERELEASE() do { \
+#define DRIVER_PRERELEASE() \
+do { \
if ( dev->dev_private ) { \
drm_radeon_private_t *dev_priv = dev->dev_private; \
if ( dev_priv->page_flipping ) { \
radeon_do_cleanup_pageflip( dev ); \
} \
radeon_mem_release( dev_priv->agp_heap ); \
+ radeon_mem_release( dev_priv->fb_heap ); \
} \
} while (0)
-/* On unloading the module:
- * - Free memory heap structure
- * - Remove mappings made at startup and free dev_private.
+/* When the last client dies, shut down the CP and free dev->dev_priv.
*/
-#define DRIVER_PRETAKEDOWN() do { \
- if ( dev->dev_private ) { \
- drm_radeon_private_t *dev_priv = dev->dev_private; \
- radeon_mem_takedown( &(dev_priv->agp_heap) ); \
- radeon_do_cleanup_cp( dev ); \
- } \
+#define __HAVE_RELEASE 1
+#define DRIVER_RELEASE() \
+do { \
+ if ( dev->open_count == 1) \
+ radeon_do_release( dev ); \
} while (0)
-#else
-#define __HAVE_DMA_IRQ 0
-#endif
+
/* DMA customization:
*/
#define __HAVE_DMA 1
+#define __HAVE_DMA_IRQ 1
+#define __HAVE_VBL_IRQ 1
+#define __HAVE_SHARED_IRQ 1
/* Buffer customization:
diff --git a/shared/radeon_cp.c b/shared/radeon_cp.c
index 7c869c029..6994fe9eb 100644
--- a/shared/radeon_cp.c
+++ b/shared/radeon_cp.c
@@ -1354,6 +1354,9 @@ int radeon_cp_stop( DRM_IOCTL_ARGS )
DRM_COPY_FROM_USER_IOCTL( stop, (drm_radeon_cp_stop_t *)data, sizeof(stop) );
+ if (!dev_priv->cp_running)
+ return 0;
+
/* Flush any pending CP commands. This ensures any outstanding
* commands are exectuted by the engine before we turn it off.
*/
@@ -1381,6 +1384,39 @@ int radeon_cp_stop( DRM_IOCTL_ARGS )
return 0;
}
+
+void radeon_do_release( drm_device_t *dev )
+{
+ drm_radeon_private_t *dev_priv = dev->dev_private;
+ int ret;
+
+ if (dev_priv) {
+ if (dev_priv->cp_running) {
+ /* Stop the cp */
+ while ((ret = radeon_do_cp_idle( dev_priv )) != 0) {
+ DRM_DEBUG("radeon_do_cp_idle %d\n", ret);
+#ifdef __linux__
+ schedule();
+#else
+ tsleep(&ret, PZERO, "rdnrel", 1);
+#endif
+ }
+ radeon_do_cp_stop( dev_priv );
+ radeon_do_engine_reset( dev );
+ }
+
+ /* Disable *all* interrupts */
+ RADEON_WRITE( RADEON_GEN_INT_CNTL, 0 );
+
+ /* Free memory heap structures */
+ radeon_mem_takedown( &(dev_priv->agp_heap) );
+ radeon_mem_takedown( &(dev_priv->fb_heap) );
+
+ /* deallocate kernel resources */
+ radeon_do_cleanup_cp( dev );
+ }
+}
+
/* Just reset the CP ring. Called as part of an X Server engine reset.
*/
int radeon_cp_reset( DRM_IOCTL_ARGS )
@@ -1412,9 +1448,6 @@ int radeon_cp_idle( DRM_IOCTL_ARGS )
LOCK_TEST_WITH_RETURN( dev );
-/* if (dev->irq) */
-/* radeon_emit_and_wait_irq( dev ); */
-
return radeon_do_cp_idle( dev_priv );
}
diff --git a/shared/radeon_drv.h b/shared/radeon_drv.h
index 65f3c9265..22c5d04fa 100644
--- a/shared/radeon_drv.h
+++ b/shared/radeon_drv.h
@@ -193,6 +193,7 @@ extern int radeon_emit_and_wait_irq(drm_device_t *dev);
extern int radeon_wait_irq(drm_device_t *dev, int swi_nr);
extern int radeon_emit_irq(drm_device_t *dev);
+extern void radeon_do_release(drm_device_t *dev);
/* Flags for stats.boxes
*/
diff --git a/shared/radeon_mem.c b/shared/radeon_mem.c
index 5c07c1afe..7ca10753a 100644
--- a/shared/radeon_mem.c
+++ b/shared/radeon_mem.c
@@ -130,16 +130,6 @@ static void free_block( struct mem_block *p )
}
}
-static void print_heap( struct mem_block *heap )
-{
- struct mem_block *p;
-
- for (p = heap->next ; p != heap ; p = p->next)
- DRM_DEBUG("0x%x..0x%x (0x%x) -- owner %d\n",
- p->start, p->start + p->size,
- p->size, p->pid);
-}
-
/* Initialize. How to check for an uninitialized heap?
*/
static int init_heap(struct mem_block **heap, int start, int size)
diff --git a/shared/radeon_state.c b/shared/radeon_state.c
index 7b480a7e9..1fe007b30 100644
--- a/shared/radeon_state.c
+++ b/shared/radeon_state.c
@@ -1074,19 +1074,30 @@ static int radeon_cp_dispatch_texture( drm_device_t *dev,
const u8 *data;
int size, dwords, tex_width, blit_width;
u32 y, height;
- int ret = 0, i;
+ int i;
RING_LOCALS;
dev_priv->stats.boxes |= RADEON_BOX_TEXTURE_LOAD;
- /* FIXME: Be smarter about this...
+ /* Flush the pixel cache. This ensures no pixel data gets mixed
+ * up with the texture data from the host data blit, otherwise
+ * part of the texture image may be corrupted.
*/
- buf = radeon_freelist_get( dev );
- if ( !buf ) return DRM_ERR(EAGAIN);
+ BEGIN_RING( 4 );
+ RADEON_FLUSH_CACHE();
+ RADEON_WAIT_UNTIL_IDLE();
+ ADVANCE_RING();
+
+#ifdef __BIG_ENDIAN
+ /* The Mesa texture functions provide the data in little endian as the
+ * chip wants it, but we need to compensate for the fact that the CP
+ * ring gets byte-swapped
+ */
+ BEGIN_RING( 2 );
+ OUT_RING_REG( RADEON_RBBM_GUICNTL, RADEON_HOST_DATA_SWAP_32BIT );
+ ADVANCE_RING();
+#endif
- DRM_DEBUG( "tex: ofs=0x%x p=%d f=%d x=%hd y=%hd w=%hd h=%hd\n",
- tex->offset >> 10, tex->pitch, tex->format,
- image->x, image->y, image->width, image->height );
/* The compiler won't optimize away a division by a variable,
* even if the only legal values are powers of two. Thus, we'll
@@ -1120,127 +1131,113 @@ static int radeon_cp_dispatch_texture( drm_device_t *dev,
return DRM_ERR(EINVAL);
}
- DRM_DEBUG( " tex=%dx%d blit=%d\n",
- tex_width, tex->height, blit_width );
-
- /* Flush the pixel cache. This ensures no pixel data gets mixed
- * up with the texture data from the host data blit, otherwise
- * part of the texture image may be corrupted.
- */
- BEGIN_RING( 4 );
-
- RADEON_FLUSH_CACHE();
- RADEON_WAIT_UNTIL_IDLE();
-
- ADVANCE_RING();
-
-#ifdef __BIG_ENDIAN
- /* The Mesa texture functions provide the data in little endian as the
- * chip wants it, but we need to compensate for the fact that the CP
- * ring gets byte-swapped
- */
- BEGIN_RING( 2 );
- OUT_RING_REG( RADEON_RBBM_GUICNTL, RADEON_HOST_DATA_SWAP_32BIT );
- ADVANCE_RING();
-#endif
-
- /* Make a copy of the parameters in case we have to update them
- * for a multi-pass texture blit.
- */
- y = image->y;
- height = image->height;
- data = (const u8 *)image->data;
-
- size = height * blit_width;
+ DRM_DEBUG("tex=%dx%d blit=%d\n", tex_width, tex->height, blit_width );
- if ( size > RADEON_MAX_TEXTURE_SIZE ) {
- /* Texture image is too large, do a multipass upload */
- ret = DRM_ERR(EAGAIN);
+ do {
+ DRM_DEBUG( "tex: ofs=0x%x p=%d f=%d x=%hd y=%hd w=%hd h=%hd\n",
+ tex->offset >> 10, tex->pitch, tex->format,
+ image->x, image->y, image->width, image->height );
- /* Adjust the blit size to fit the indirect buffer */
- height = RADEON_MAX_TEXTURE_SIZE / blit_width;
+ /* Make a copy of the parameters in case we have to
+ * update them for a multi-pass texture blit.
+ */
+ y = image->y;
+ height = image->height;
+ data = (const u8 *)image->data;
+
size = height * blit_width;
+ if ( size > RADEON_MAX_TEXTURE_SIZE ) {
+ height = RADEON_MAX_TEXTURE_SIZE / blit_width;
+ size = height * blit_width;
+ } else if ( size < 4 && size > 0 ) {
+ size = 4;
+ } else if ( size == 0 ) {
+ return 0;
+ }
+
/* Update the input parameters for next time */
image->y += height;
image->height -= height;
- image->data = (const char *)image->data + size;
+ image->data += size;
- if ( DRM_COPY_TO_USER( tex->image, image, sizeof(*image) ) ) {
- DRM_ERROR( "EFAULT on tex->image\n" );
- return DRM_ERR(EFAULT);
+ buf = radeon_freelist_get( dev );
+ if ( 0 && !buf ) {
+ radeon_do_cp_idle( dev_priv );
+ buf = radeon_freelist_get( dev );
+ }
+ if ( !buf ) {
+ DRM_DEBUG("radeon_cp_dispatch_texture: EAGAIN\n");
+ DRM_COPY_TO_USER( tex->image, image, sizeof(*image) );
+ return DRM_ERR(EAGAIN);
}
- } else if ( size < 4 && size > 0 ) {
- size = 4;
- }
- dwords = size / 4;
- /* Dispatch the indirect buffer.
- */
- buffer = (u32 *)((char *)dev_priv->buffers->handle + buf->offset);
-
- buffer[0] = CP_PACKET3( RADEON_CNTL_HOSTDATA_BLT, dwords + 6 );
- buffer[1] = (RADEON_GMC_DST_PITCH_OFFSET_CNTL |
- RADEON_GMC_BRUSH_NONE |
- (format << 8) |
- RADEON_GMC_SRC_DATATYPE_COLOR |
- RADEON_ROP3_S |
- RADEON_DP_SRC_SOURCE_HOST_DATA |
- RADEON_GMC_CLR_CMP_CNTL_DIS |
- RADEON_GMC_WR_MSK_DIS);
-
- buffer[2] = (tex->pitch << 22) | (tex->offset >> 10);
- buffer[3] = 0xffffffff;
- buffer[4] = 0xffffffff;
- buffer[5] = (y << 16) | image->x;
- buffer[6] = (height << 16) | image->width;
- buffer[7] = dwords;
-
- buffer += 8;
-
- if ( tex_width >= 32 ) {
- /* Texture image width is larger than the minimum, so we
- * can upload it directly.
- */
- if ( DRM_COPY_FROM_USER( buffer, data, dwords * sizeof(u32) ) ) {
- DRM_ERROR( "EFAULT on data, %d dwords\n", dwords );
- return DRM_ERR(EFAULT);
- }
- } else {
- /* Texture image width is less than the minimum, so we
- * need to pad out each image scanline to the minimum
- * width.
+ /* Dispatch the indirect buffer.
*/
- for ( i = 0 ; i < tex->height ; i++ ) {
- if ( DRM_COPY_FROM_USER( buffer, data, tex_width ) ) {
- DRM_ERROR( "EFAULT on pad, %d bytes\n",
- tex_width );
+ buffer = (u32*)((char*)dev_priv->buffers->handle + buf->offset);
+ dwords = size / 4;
+ buffer[0] = CP_PACKET3( RADEON_CNTL_HOSTDATA_BLT, dwords + 6 );
+ buffer[1] = (RADEON_GMC_DST_PITCH_OFFSET_CNTL |
+ RADEON_GMC_BRUSH_NONE |
+ (format << 8) |
+ RADEON_GMC_SRC_DATATYPE_COLOR |
+ RADEON_ROP3_S |
+ RADEON_DP_SRC_SOURCE_HOST_DATA |
+ RADEON_GMC_CLR_CMP_CNTL_DIS |
+ RADEON_GMC_WR_MSK_DIS);
+
+ buffer[2] = (tex->pitch << 22) | (tex->offset >> 10);
+ buffer[3] = 0xffffffff;
+ buffer[4] = 0xffffffff;
+ buffer[5] = (y << 16) | image->x;
+ buffer[6] = (height << 16) | image->width;
+ buffer[7] = dwords;
+ buffer += 8;
+
+ if ( tex_width >= 32 ) {
+ /* Texture image width is larger than the minimum, so we
+ * can upload it directly.
+ */
+ if ( DRM_COPY_FROM_USER( buffer, data,
+ dwords * sizeof(u32) ) ) {
+ DRM_ERROR( "EFAULT on data, %d dwords\n",
+ dwords );
return DRM_ERR(EFAULT);
}
- buffer += 8;
- data += tex_width;
+ } else {
+ /* Texture image width is less than the minimum, so we
+ * need to pad out each image scanline to the minimum
+ * width.
+ */
+ for ( i = 0 ; i < tex->height ; i++ ) {
+ if ( DRM_COPY_FROM_USER( buffer, data,
+ tex_width ) ) {
+ DRM_ERROR( "EFAULT on pad, %d bytes\n",
+ tex_width );
+ return DRM_ERR(EFAULT);
+ }
+ buffer += 8;
+ data += tex_width;
+ }
}
- }
- buf->pid = DRM_CURRENTPID;
- buf->used = (dwords + 8) * sizeof(u32);
+ buf->pid = DRM_CURRENTPID;
+ buf->used = (dwords + 8) * sizeof(u32);
+ radeon_cp_dispatch_indirect( dev, buf, 0, buf->used );
+ radeon_cp_discard_buffer( dev, buf );
- radeon_cp_dispatch_indirect( dev, buf, 0, buf->used );
- radeon_cp_discard_buffer( dev, buf );
+ } while (image->height > 0);
/* Flush the pixel cache after the blit completes. This ensures
* the texture data is written out to memory before rendering
* continues.
*/
BEGIN_RING( 4 );
-
RADEON_FLUSH_CACHE();
RADEON_WAIT_UNTIL_2D_IDLE();
-
ADVANCE_RING();
-
- return ret;
+ return 0;
}