summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorDavid Dawes <dawes@xfree86.org>2002-01-27 18:23:05 +0000
committerDavid Dawes <dawes@xfree86.org>2002-01-27 18:23:05 +0000
commit8d9905c9d847522e8ad699f5a5dd36b681f2568c (patch)
tree36df6ec9e2fcef168472b9af6f64d55249466ad5
parent8a46f6a0e2eb31c4eaba6d9d7f5e628536ce9607 (diff)
Import of XFree86 4.2.0
-rw-r--r--bsd-core/Makefile3
-rw-r--r--bsd-core/ati_pcigart.c197
-rw-r--r--bsd-core/drmP.h723
-rw-r--r--bsd-core/drm_agpsupport.c326
-rw-r--r--bsd-core/drm_auth.c166
-rw-r--r--bsd-core/drm_bufs.c1092
-rw-r--r--bsd-core/drm_context.c730
-rw-r--r--bsd-core/drm_dma.c605
-rw-r--r--bsd-core/drm_drawable.c50
-rw-r--r--bsd-core/drm_drv.c1160
-rw-r--r--bsd-core/drm_fops.c223
-rw-r--r--bsd-core/drm_ioctl.c237
-rw-r--r--bsd-core/drm_lock.c244
-rw-r--r--bsd-core/drm_memory.c433
-rw-r--r--bsd-core/drm_os_freebsd.h375
-rw-r--r--bsd-core/drm_scatter.c237
-rw-r--r--bsd-core/drm_sysctl.c523
-rw-r--r--bsd-core/drm_vm.c81
-rw-r--r--bsd-core/mga/Makefile25
-rw-r--r--bsd-core/r128/Makefile25
-rw-r--r--bsd-core/radeon/Makefile25
-rw-r--r--bsd-core/tdfx/Makefile20
-rw-r--r--bsd/Imakefile28
-rw-r--r--bsd/Makefile3
-rw-r--r--bsd/Makefile.bsd3
-rw-r--r--bsd/ati_pcigart.h197
-rw-r--r--bsd/drm.h325
-rw-r--r--bsd/drmP.h723
-rw-r--r--bsd/drm_agpsupport.h326
-rw-r--r--bsd/drm_auth.h166
-rw-r--r--bsd/drm_bufs.h1092
-rw-r--r--bsd/drm_context.h730
-rw-r--r--bsd/drm_dma.h605
-rw-r--r--bsd/drm_drawable.h50
-rw-r--r--bsd/drm_drv.h1160
-rw-r--r--bsd/drm_fops.h223
-rw-r--r--bsd/drm_init.h110
-rw-r--r--bsd/drm_ioctl.h237
-rw-r--r--bsd/drm_lists.h236
-rw-r--r--bsd/drm_lock.h244
-rw-r--r--bsd/drm_memory.h433
-rw-r--r--bsd/drm_os_freebsd.h375
-rw-r--r--bsd/drm_scatter.h237
-rw-r--r--bsd/drm_sysctl.h523
-rw-r--r--bsd/drm_vm.h81
-rw-r--r--bsd/gamma/Makefile14
-rw-r--r--bsd/i810_drm.h13
-rw-r--r--bsd/mga/Makefile25
-rw-r--r--bsd/mga/mga_dma.c1544
-rw-r--r--bsd/mga/mga_drv.c752
-rw-r--r--bsd/mga/mga_drv.h1024
-rw-r--r--bsd/mga/mga_state.c1577
-rw-r--r--bsd/mga_drm.h395
-rw-r--r--bsd/r128/Makefile25
-rw-r--r--bsd/r128_drm.h300
-rw-r--r--bsd/radeon/Makefile25
-rw-r--r--bsd/tdfx/Makefile20
-rw-r--r--bsd/tdfx/tdfx_drv.c749
-rw-r--r--libdrm/xf86drm.c4
-rw-r--r--linux-core/Makefile.kernel7
-rw-r--r--linux-core/ati_pcigart.c20
-rw-r--r--linux-core/drmP.h200
-rw-r--r--linux-core/drm_agpsupport.c22
-rw-r--r--linux-core/drm_bufs.c4
-rw-r--r--linux-core/drm_context.c13
-rw-r--r--linux-core/drm_drv.c23
-rw-r--r--linux-core/drm_fops.c30
-rw-r--r--linux-core/drm_ioctl.c4
-rw-r--r--linux-core/drm_memory.h15
-rw-r--r--linux-core/drm_proc.c2
-rw-r--r--linux-core/drm_scatter.c6
-rw-r--r--linux-core/drm_stub.c6
-rw-r--r--linux-core/drm_vm.c133
-rw-r--r--linux-core/i810_dma.c162
-rw-r--r--linux-core/i810_drm.h16
-rw-r--r--linux-core/i810_drv.c11
-rw-r--r--linux-core/i810_drv.h14
-rw-r--r--linux-core/i830_dma.c1418
-rw-r--r--linux-core/i830_drm.h238
-rw-r--r--linux-core/i830_drv.c102
-rw-r--r--linux-core/i830_drv.h213
-rw-r--r--linux-core/r128_drv.c6
-rw-r--r--linux-core/sis_drv.c10
-rw-r--r--linux/Makefile.kernel7
-rw-r--r--linux/Makefile.linux41
-rw-r--r--linux/ati_pcigart.h20
-rw-r--r--linux/drm.h22
-rw-r--r--linux/drmP.h200
-rw-r--r--linux/drm_agpsupport.h22
-rw-r--r--linux/drm_bufs.h4
-rw-r--r--linux/drm_context.h13
-rw-r--r--linux/drm_drv.h23
-rw-r--r--linux/drm_fops.h30
-rw-r--r--linux/drm_ioctl.h4
-rw-r--r--linux/drm_memory.h15
-rw-r--r--linux/drm_proc.h2
-rw-r--r--linux/drm_scatter.h6
-rw-r--r--linux/drm_stub.h6
-rw-r--r--linux/drm_vm.h133
-rw-r--r--linux/i810_dma.c162
-rw-r--r--linux/i810_drm.h16
-rw-r--r--linux/i810_drv.c11
-rw-r--r--linux/i810_drv.h14
-rw-r--r--linux/i830.h116
-rw-r--r--linux/i830_dma.c1418
-rw-r--r--linux/i830_drm.h238
-rw-r--r--linux/i830_drv.c102
-rw-r--r--linux/i830_drv.h213
-rw-r--r--linux/picker.c5
-rw-r--r--linux/r128_cce.c9
-rw-r--r--linux/r128_drv.c6
-rw-r--r--linux/r128_state.c69
-rw-r--r--linux/radeon_cp.c16
-rw-r--r--linux/radeon_state.c2
-rw-r--r--linux/sis.h8
-rw-r--r--linux/sis_drv.c10
-rw-r--r--linux/sis_ds.c2
-rw-r--r--linux/sis_mm.c8
-rw-r--r--shared-core/drm.h22
-rw-r--r--shared/drm.h22
120 files changed, 22135 insertions, 5631 deletions
diff --git a/bsd-core/Makefile b/bsd-core/Makefile
index 61cba175..9c87d963 100644
--- a/bsd-core/Makefile
+++ b/bsd-core/Makefile
@@ -1,5 +1,6 @@
# $FreeBSD$
-SUBDIR = drm tdfx mga gamma
+# i810, i830 & sis are not complete
+SUBDIR = tdfx mga r128 radeon gamma # i810 sis i830
.include <bsd.subdir.mk>
diff --git a/bsd-core/ati_pcigart.c b/bsd-core/ati_pcigart.c
new file mode 100644
index 00000000..8b486c10
--- /dev/null
+++ b/bsd-core/ati_pcigart.c
@@ -0,0 +1,197 @@
+/* ati_pcigart.h -- ATI PCI GART support -*- linux-c -*-
+ * Created: Wed Dec 13 21:52:19 2000 by gareth@valinux.com
+ *
+ * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ * Authors:
+ * Gareth Hughes <gareth@valinux.com>
+ */
+
+#define __NO_VERSION__
+#include "drmP.h"
+
+#if PAGE_SIZE == 8192
+# define ATI_PCIGART_TABLE_ORDER 2
+# define ATI_PCIGART_TABLE_PAGES (1 << 2)
+#elif PAGE_SIZE == 4096
+# define ATI_PCIGART_TABLE_ORDER 3
+# define ATI_PCIGART_TABLE_PAGES (1 << 3)
+#elif
+# error - PAGE_SIZE not 8K or 4K
+#endif
+
+# define ATI_MAX_PCIGART_PAGES 8192 /* 32 MB aperture, 4K pages */
+# define ATI_PCIGART_PAGE_SIZE 4096 /* PCI GART page size */
+
+static unsigned long DRM(ati_alloc_pcigart_table)( void )
+{
+ unsigned long address;
+ struct page *page;
+ int i;
+ DRM_DEBUG( "%s\n", __FUNCTION__ );
+
+ address = __get_free_pages( GFP_KERNEL, ATI_PCIGART_TABLE_ORDER );
+ if ( address == 0UL ) {
+ return 0;
+ }
+
+ page = virt_to_page( address );
+
+ for ( i = 0 ; i <= ATI_PCIGART_TABLE_PAGES ; i++, page++ ) {
+ atomic_inc( &page->count );
+ SetPageReserved( page );
+ }
+
+ DRM_DEBUG( "%s: returning 0x%08lx\n", __FUNCTION__, address );
+ return address;
+}
+
+static void DRM(ati_free_pcigart_table)( unsigned long address )
+{
+ struct page *page;
+ int i;
+ DRM_DEBUG( "%s\n", __FUNCTION__ );
+
+ page = virt_to_page( address );
+
+ for ( i = 0 ; i <= ATI_PCIGART_TABLE_PAGES ; i++, page++ ) {
+ atomic_dec( &page->count );
+ ClearPageReserved( page );
+ }
+
+ free_pages( address, ATI_PCIGART_TABLE_ORDER );
+}
+
+int DRM(ati_pcigart_init)( drm_device_t *dev,
+ unsigned long *addr,
+ dma_addr_t *bus_addr)
+{
+ drm_sg_mem_t *entry = dev->sg;
+ unsigned long address = 0;
+ unsigned long pages;
+ u32 *pci_gart, page_base, bus_address = 0;
+ int i, j, ret = 0;
+
+ if ( !entry ) {
+ DRM_ERROR( "no scatter/gather memory!\n" );
+ goto done;
+ }
+
+ address = DRM(ati_alloc_pcigart_table)();
+ if ( !address ) {
+ DRM_ERROR( "cannot allocate PCI GART page!\n" );
+ goto done;
+ }
+
+ if ( !dev->pdev ) {
+ DRM_ERROR( "PCI device unknown!\n" );
+ goto done;
+ }
+
+ bus_address = pci_map_single(dev->pdev, (void *)address,
+ ATI_PCIGART_TABLE_PAGES * PAGE_SIZE,
+ PCI_DMA_TODEVICE);
+ if (bus_address == 0) {
+ DRM_ERROR( "unable to map PCIGART pages!\n" );
+ DRM(ati_free_pcigart_table)( address );
+ address = 0;
+ goto done;
+ }
+
+ pci_gart = (u32 *)address;
+
+ pages = ( entry->pages <= ATI_MAX_PCIGART_PAGES )
+ ? entry->pages : ATI_MAX_PCIGART_PAGES;
+
+ memset( pci_gart, 0, ATI_MAX_PCIGART_PAGES * sizeof(u32) );
+
+ for ( i = 0 ; i < pages ; i++ ) {
+ /* we need to support large memory configurations */
+ entry->busaddr[i] = pci_map_single(dev->pdev,
+ page_address( entry->pagelist[i] ),
+ PAGE_SIZE,
+ PCI_DMA_TODEVICE);
+ if (entry->busaddr[i] == 0) {
+ DRM_ERROR( "unable to map PCIGART pages!\n" );
+ DRM(ati_pcigart_cleanup)( dev, address, bus_address );
+ address = 0;
+ bus_address = 0;
+ goto done;
+ }
+ page_base = (u32) entry->busaddr[i];
+
+ for (j = 0; j < (PAGE_SIZE / ATI_PCIGART_PAGE_SIZE); j++) {
+ *pci_gart++ = cpu_to_le32( page_base );
+ page_base += ATI_PCIGART_PAGE_SIZE;
+ }
+ }
+
+ ret = 1;
+
+#if defined(__i386__) || defined(__x86_64__)
+ asm volatile ( "wbinvd" ::: "memory" );
+#else
+ mb();
+#endif
+
+done:
+ *addr = address;
+ *bus_addr = bus_address;
+ return ret;
+}
+
+int DRM(ati_pcigart_cleanup)( drm_device_t *dev,
+ unsigned long addr,
+ dma_addr_t bus_addr)
+{
+ drm_sg_mem_t *entry = dev->sg;
+ unsigned long pages;
+ int i;
+
+ /* we need to support large memory configurations */
+ if ( !entry ) {
+ DRM_ERROR( "no scatter/gather memory!\n" );
+ return 0;
+ }
+
+ if ( bus_addr ) {
+ pci_unmap_single(dev->pdev, bus_addr,
+ ATI_PCIGART_TABLE_PAGES * PAGE_SIZE,
+ PCI_DMA_TODEVICE);
+
+ pages = ( entry->pages <= ATI_MAX_PCIGART_PAGES )
+ ? entry->pages : ATI_MAX_PCIGART_PAGES;
+
+ for ( i = 0 ; i < pages ; i++ ) {
+ if ( !entry->busaddr[i] ) break;
+ pci_unmap_single(dev->pdev, entry->busaddr[i],
+ PAGE_SIZE, PCI_DMA_TODEVICE);
+ }
+ }
+
+ if ( addr ) {
+ DRM(ati_free_pcigart_table)( addr );
+ }
+
+ return 1;
+}
diff --git a/bsd-core/drmP.h b/bsd-core/drmP.h
index 7a1159c7..ead40f86 100644
--- a/bsd-core/drmP.h
+++ b/bsd-core/drmP.h
@@ -1,8 +1,8 @@
-/* drmP.h -- Private header for Direct Rendering Manager -*- c -*-
+/* drmP.h -- Private header for Direct Rendering Manager -*- linux-c -*-
* Created: Mon Jan 4 10:05:05 1999 by faith@precisioninsight.com
- * Revised: Tue Oct 12 08:51:07 1999 by faith@precisioninsight.com
*
* Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
+ * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
* All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
@@ -11,225 +11,119 @@
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
- *
+ *
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
- *
+ *
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
- * DEALINGS IN THE SOFTWARE.
- *
- * $PI: xc/programs/Xserver/hw/xfree86/os-support/linux/drm/kernel/drmP.h,v 1.58 1999/08/30 13:05:00 faith Exp $
- * $XFree86: xc/programs/Xserver/hw/xfree86/os-support/bsd/drm/kernel/drmP.h,v 1.3 2001/03/06 16:45:26 dawes Exp $
- *
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors:
+ * Rickard E. (Rik) Faith <faith@valinux.com>
+ * Gareth Hughes <gareth@valinux.com>
*/
#ifndef _DRM_P_H_
#define _DRM_P_H_
-#ifdef _KERNEL
-#include <sys/param.h>
-#include <sys/queue.h>
-#include <sys/malloc.h>
-#include <sys/kernel.h>
-#include <sys/module.h>
-#include <sys/systm.h>
-#include <sys/conf.h>
-#include <sys/stat.h>
-#include <sys/proc.h>
-#include <sys/lock.h>
-#include <sys/fcntl.h>
-#include <sys/uio.h>
-#include <sys/filio.h>
-#include <sys/sysctl.h>
-#include <sys/select.h>
-#include <sys/bus.h>
-#if __FreeBSD_version >= 400005
-#include <sys/taskqueue.h>
-#endif
+#if defined(_KERNEL) || defined(__KERNEL__)
-#if __FreeBSD_version >= 400006
-#define DRM_AGP
+/* DRM template customization defaults
+ */
+#ifndef __HAVE_AGP
+#define __HAVE_AGP 0
#endif
-
-#ifdef DRM_AGP
-#include <pci/agpvar.h>
+#ifndef __HAVE_MTRR
+#define __HAVE_MTRR 0
+#endif
+#ifndef __HAVE_CTX_BITMAP
+#define __HAVE_CTX_BITMAP 0
+#endif
+#ifndef __HAVE_DMA
+#define __HAVE_DMA 0
+#endif
+#ifndef __HAVE_DMA_IRQ
+#define __HAVE_DMA_IRQ 0
+#endif
+#ifndef __HAVE_DMA_WAITLIST
+#define __HAVE_DMA_WAITLIST 0
+#endif
+#ifndef __HAVE_DMA_FREELIST
+#define __HAVE_DMA_FREELIST 0
+#endif
+#ifndef __HAVE_DMA_HISTOGRAM
+#define __HAVE_DMA_HISTOGRAM 0
#endif
-#include "drm.h"
+#define DRM_DEBUG_CODE 0 /* Include debugging code (if > 1, then
+ also include looping detection. */
-typedef u_int32_t atomic_t;
-typedef u_int32_t cycles_t;
-typedef u_int32_t spinlock_t;
-#define atomic_set(p, v) (*(p) = (v))
-#define atomic_read(p) (*(p))
-#define atomic_inc(p) atomic_add_int(p, 1)
-#define atomic_dec(p) atomic_subtract_int(p, 1)
-#define atomic_add(n, p) atomic_add_int(p, n)
-#define atomic_sub(n, p) atomic_subtract_int(p, n)
-
-/* The version number here is a guess */
-#if __FreeBSD_version >= 500010
-#define callout_init(a) callout_init(a, 0)
-#endif
+typedef struct drm_device drm_device_t;
+typedef struct drm_file drm_file_t;
-/* Fake this */
-static __inline u_int32_t
-test_and_set_bit(int b, volatile u_int32_t *p)
-{
- int s = splhigh();
- u_int32_t m = 1<<b;
- u_int32_t r = *p & m;
- *p |= m;
- splx(s);
- return r;
-}
-
-static __inline void
-clear_bit(int b, volatile u_int32_t *p)
-{
- atomic_clear_int(p + (b >> 5), 1 << (b & 0x1f));
-}
-
-static __inline void
-set_bit(int b, volatile u_int32_t *p)
-{
- atomic_set_int(p + (b >> 5), 1 << (b & 0x1f));
-}
-
-static __inline int
-test_bit(int b, volatile u_int32_t *p)
-{
- return p[b >> 5] & (1 << (b & 0x1f));
-}
-
-static __inline int
-find_first_zero_bit(volatile u_int32_t *p, int max)
-{
- int b;
-
- for (b = 0; b < max; b += 32) {
- if (p[b >> 5] != ~0) {
- for (;;) {
- if ((p[b >> 5] & (1 << (b & 0x1f))) == 0)
- return b;
- b++;
- }
- }
- }
- return max;
-}
-
-#define spldrm() spltty()
-
-#define memset(p, v, s) bzero(p, s)
-
-/*
- * Fake out the module macros for versions of FreeBSD where they don't
- * exist.
- */
-#if __FreeBSD_version < 400002
+/* There's undoubtably more of this file to go into these OS dependent ones. */
-#define MODULE_VERSION(a,b) struct __hack
-#define MODULE_DEPEND(a,b,c,d,e) struct __hack
+#include "drm_os_freebsd.h"
-#endif
+#include "drm.h"
-#define DRM_DEBUG_CODE 0 /* Include debugging code (if > 1, then
- also include looping detection. */
-#define DRM_DMA_HISTOGRAM 1 /* Make histogram of DMA latency. */
+/* Begin the DRM... */
#define DRM_HASH_SIZE 16 /* Size of key hash table */
#define DRM_KERNEL_CONTEXT 0 /* Change drm_resctx if changed */
#define DRM_RESERVED_CONTEXTS 1 /* Change drm_resctx if changed */
#define DRM_LOOPING_LIMIT 5000000
#define DRM_BSZ 1024 /* Buffer size for /dev/drm? output */
-#define DRM_TIME_SLICE (hz/20) /* Time slice for GLXContexts */
#define DRM_LOCK_SLICE 1 /* Time slice for lock, in jiffies */
#define DRM_FLAG_DEBUG 0x01
#define DRM_FLAG_NOCTX 0x02
-#define DRM_MEM_DMA 0
-#define DRM_MEM_SAREA 1
-#define DRM_MEM_DRIVER 2
-#define DRM_MEM_MAGIC 3
-#define DRM_MEM_IOCTLS 4
-#define DRM_MEM_MAPS 5
-#define DRM_MEM_VMAS 6
-#define DRM_MEM_BUFS 7
-#define DRM_MEM_SEGS 8
-#define DRM_MEM_PAGES 9
-#define DRM_MEM_FILES 10
-#define DRM_MEM_QUEUES 11
-#define DRM_MEM_CMDS 12
-#define DRM_MEM_MAPPINGS 13
-#define DRM_MEM_BUFLISTS 14
+#define DRM_MEM_DMA 0
+#define DRM_MEM_SAREA 1
+#define DRM_MEM_DRIVER 2
+#define DRM_MEM_MAGIC 3
+#define DRM_MEM_IOCTLS 4
+#define DRM_MEM_MAPS 5
+#define DRM_MEM_VMAS 6
+#define DRM_MEM_BUFS 7
+#define DRM_MEM_SEGS 8
+#define DRM_MEM_PAGES 9
+#define DRM_MEM_FILES 10
+#define DRM_MEM_QUEUES 11
+#define DRM_MEM_CMDS 12
+#define DRM_MEM_MAPPINGS 13
+#define DRM_MEM_BUFLISTS 14
#define DRM_MEM_AGPLISTS 15
#define DRM_MEM_TOTALAGP 16
#define DRM_MEM_BOUNDAGP 17
#define DRM_MEM_CTXBITMAP 18
+#define DRM_MEM_STUB 19
+#define DRM_MEM_SGLISTS 20
#define DRM_MAX_CTXBITMAP (PAGE_SIZE * 8)
/* Backward compatibility section */
+ /* _PAGE_WT changed to _PAGE_PWT in 2.2.6 */
#ifndef _PAGE_PWT
- /* The name of _PAGE_WT was changed to
- _PAGE_PWT in Linux 2.2.6 */
#define _PAGE_PWT _PAGE_WT
#endif
-#define __drm_dummy_lock(lock) (*(__volatile__ unsigned int *)lock)
-#define _DRM_CAS(lock,old,new,__ret) \
- do { \
- int __dummy; /* Can't mark eax as clobbered */ \
- __asm__ __volatile__( \
- "lock ; cmpxchg %4,%1\n\t" \
- "setnz %0" \
- : "=d" (__ret), \
- "=m" (__drm_dummy_lock(lock)), \
- "=a" (__dummy) \
- : "2" (old), \
- "r" (new)); \
- } while (0)
-
+ /* Mapping helper macros */
+#define DRM_IOREMAP(map) \
+ (map)->handle = DRM(ioremap)( (map)->offset, (map)->size )
-
- /* Macros to make printk easier */
-#define DRM_ERROR(fmt, arg...) \
- printf("error: " "[" DRM_NAME ":" __FUNCTION__ "] *ERROR* " fmt , ##arg)
-#define DRM_MEM_ERROR(area, fmt, arg...) \
- printf("error: " "[" DRM_NAME ":" __FUNCTION__ ":%s] *ERROR* " fmt , \
- drm_mem_stats[area].name , ##arg)
-#define DRM_INFO(fmt, arg...) printf("info: " "[" DRM_NAME "] " fmt , ##arg)
-
-#if DRM_DEBUG_CODE
-#define DRM_DEBUG(fmt, arg...) \
- do { \
- if (drm_flags&DRM_FLAG_DEBUG) \
- printf("[" DRM_NAME ":" __FUNCTION__ "] " fmt , \
- ##arg); \
+#define DRM_IOREMAPFREE(map) \
+ do { \
+ if ( (map)->handle && (map)->size ) \
+ DRM(ioremapfree)( (map)->handle, (map)->size ); \
} while (0)
-#else
-#define DRM_DEBUG(fmt, arg...) do { } while (0)
-#endif
-
-#define DRM_PROC_LIMIT (PAGE_SIZE-80)
-
-#define DRM_SYSCTL_PRINT(fmt, arg...) \
- snprintf(buf, sizeof(buf), fmt, ##arg); \
- error = SYSCTL_OUT(req, buf, strlen(buf)); \
- if (error) return error;
-
-#define DRM_SYSCTL_PRINT_RET(ret, fmt, arg...) \
- snprintf(buf, sizeof(buf), fmt, ##arg); \
- error = SYSCTL_OUT(req, buf, strlen(buf)); \
- if (error) { ret; return error; }
/* Internal types and structures */
#define DRM_ARRAY_SIZE(x) (sizeof(x)/sizeof(x[0]))
@@ -240,15 +134,25 @@ find_first_zero_bit(volatile u_int32_t *p, int max)
#define DRM_BUFCOUNT(x) ((x)->count - DRM_LEFTCOUNT(x))
#define DRM_WAITCOUNT(dev,idx) DRM_BUFCOUNT(&dev->queuelist[idx]->waitlist)
+#define DRM_GET_PRIV_SAREA(_dev, _ctx, _map) do { \
+ (_map) = (_dev)->context_sareas[_ctx]; \
+} while(0)
+
+
+typedef struct drm_pci_list {
+ u16 vendor;
+ u16 device;
+} drm_pci_list_t;
+
typedef struct drm_ioctl_desc {
- d_ioctl_t *func;
+ d_ioctl_t *func;
int auth_needed;
int root_only;
} drm_ioctl_desc_t;
typedef struct drm_devstate {
pid_t owner; /* X server pid holding x_lock */
-
+
} drm_devstate_t;
typedef struct drm_magic_entry {
@@ -258,8 +162,8 @@ typedef struct drm_magic_entry {
} drm_magic_entry_t;
typedef struct drm_magic_head {
- struct drm_magic_entry *head;
- struct drm_magic_entry *tail;
+ struct drm_magic_entry *head;
+ struct drm_magic_entry *tail;
} drm_magic_head_t;
typedef struct drm_vma_entry {
@@ -279,7 +183,7 @@ typedef struct drm_buf {
struct drm_buf *next; /* Kernel-only: used for free list */
__volatile__ int waiting; /* On kernel DMA queue */
__volatile__ int pending; /* On hardware DMA queue */
- int dma_wait; /* Processes waiting */
+ wait_queue_head_t dma_wait; /* Processes waiting */
pid_t pid; /* PID of holding process */
int context; /* Kernel queue for this buffer */
int while_locked;/* Dispatch this buffer while locked */
@@ -292,15 +196,15 @@ typedef struct drm_buf {
DRM_LIST_RECLAIM = 5
} list; /* Which list we're on */
- void *dev_private;
- int dev_priv_size;
-
#if DRM_DMA_HISTOGRAM
- struct timespec time_queued; /* Queued to kernel DMA queue */
- struct timespec time_dispatched; /* Dispatched to hardware */
- struct timespec time_completed; /* Completed by hardware */
- struct timespec time_freed; /* Back on freelist */
+ cycles_t time_queued; /* Queued to kernel DMA queue */
+ cycles_t time_dispatched; /* Dispatched to hardware */
+ cycles_t time_completed; /* Completed by hardware */
+ cycles_t time_freed; /* Back on freelist */
#endif
+
+ int dev_priv_size; /* Size of buffer private stoarge */
+ void *dev_private; /* Per-buffer private storage */
} drm_buf_t;
#if DRM_DMA_HISTOGRAM
@@ -309,14 +213,14 @@ typedef struct drm_buf {
#define DRM_DMA_HISTOGRAM_NEXT(current) ((current)*10)
typedef struct drm_histogram {
atomic_t total;
-
+
atomic_t queued_to_dispatched[DRM_DMA_HISTOGRAM_SLOTS];
atomic_t dispatched_to_completed[DRM_DMA_HISTOGRAM_SLOTS];
atomic_t completed_to_freed[DRM_DMA_HISTOGRAM_SLOTS];
-
+
atomic_t queued_to_completed[DRM_DMA_HISTOGRAM_SLOTS];
atomic_t queued_to_freed[DRM_DMA_HISTOGRAM_SLOTS];
-
+
atomic_t dma[DRM_DMA_HISTOGRAM_SLOTS];
atomic_t schedule[DRM_DMA_HISTOGRAM_SLOTS];
atomic_t ctx[DRM_DMA_HISTOGRAM_SLOTS];
@@ -332,20 +236,20 @@ typedef struct drm_waitlist {
drm_buf_t **rp; /* Read pointer */
drm_buf_t **wp; /* Write pointer */
drm_buf_t **end; /* End pointer */
- spinlock_t read_lock;
- spinlock_t write_lock;
+ DRM_OS_SPINTYPE read_lock;
+ DRM_OS_SPINTYPE write_lock;
} drm_waitlist_t;
typedef struct drm_freelist {
int initialized; /* Freelist in use */
atomic_t count; /* Number of free buffers */
drm_buf_t *next; /* End pointer */
-
- int waiting; /* Processes waiting on free bufs */
+
+ wait_queue_head_t waiting; /* Processes waiting on free bufs */
int low_mark; /* Low water mark */
int high_mark; /* High water mark */
atomic_t wfh; /* If waiting for high mark */
- struct simplelock lock; /* hope this doesn't need to be linux compatible */
+ DRM_OS_SPINTYPE lock;
} drm_freelist_t;
typedef struct drm_buf_entry {
@@ -365,7 +269,7 @@ typedef struct drm_hw_lock {
} drm_hw_lock_t;
typedef TAILQ_HEAD(drm_file_list, drm_file) drm_file_list_t;
-typedef struct drm_file {
+struct drm_file {
TAILQ_ENTRY(drm_file) link;
int authenticated;
int minor;
@@ -375,38 +279,40 @@ typedef struct drm_file {
drm_magic_t magic;
unsigned long ioctl_count;
struct drm_device *devXX;
-} drm_file_t;
-
+};
typedef struct drm_queue {
atomic_t use_count; /* Outstanding uses (+1) */
atomic_t finalization; /* Finalization in progress */
atomic_t block_count; /* Count of processes waiting */
atomic_t block_read; /* Queue blocked for reads */
- int read_queue; /* Processes waiting on block_read */
+ wait_queue_head_t read_queue; /* Processes waiting on block_read */
atomic_t block_write; /* Queue blocked for writes */
- int write_queue; /* Processes waiting on block_write */
+ wait_queue_head_t write_queue; /* Processes waiting on block_write */
+#if 1
atomic_t total_queued; /* Total queued statistic */
atomic_t total_flushed;/* Total flushes statistic */
atomic_t total_locks; /* Total locks statistics */
+#endif
drm_ctx_flags_t flags; /* Context preserving and 2D-only */
drm_waitlist_t waitlist; /* Pending buffers */
- int flush_queue; /* Processes waiting until flush */
+ wait_queue_head_t flush_queue; /* Processes waiting until flush */
} drm_queue_t;
typedef struct drm_lock_data {
drm_hw_lock_t *hw_lock; /* Hardware lock */
pid_t pid; /* PID of lock holder (0=kernel) */
- int lock_queue; /* Queue of blocked processes */
+ wait_queue_head_t lock_queue; /* Queue of blocked processes */
unsigned long lock_time; /* Time of last lock in jiffies */
} drm_lock_data_t;
typedef struct drm_device_dma {
+#if 0
/* Performance Counters */
atomic_t total_prio; /* Total DRM_DMA_PRIORITY */
atomic_t total_bytes; /* Total bytes DMA'd */
atomic_t total_dmas; /* Total DMA buffers dispatched */
-
+
atomic_t total_missed_dma; /* Missed drm_do_dma */
atomic_t total_missed_lock; /* Missed lock in drm_do_dma */
atomic_t total_missed_free; /* Missed drm_free_this_buffer */
@@ -415,27 +321,28 @@ typedef struct drm_device_dma {
atomic_t total_tried; /* Tried next_buffer */
atomic_t total_hit; /* Sent next_buffer */
atomic_t total_lost; /* Lost interrupt */
+#endif
drm_buf_entry_t bufs[DRM_MAX_ORDER+1];
int buf_count;
drm_buf_t **buflist; /* Vector of pointers info bufs */
- int seg_count;
+ int seg_count;
int page_count;
- vm_offset_t *pagelist;
+ unsigned long *pagelist;
unsigned long byte_count;
enum {
- _DRM_DMA_USE_AGP = 0x01
+ _DRM_DMA_USE_AGP = 0x01,
+ _DRM_DMA_USE_SG = 0x02
} flags;
/* DMA support */
drm_buf_t *this_buffer; /* Buffer being sent */
drm_buf_t *next_buffer; /* Selected buffer to send */
drm_queue_t *next_queue; /* Queue from which buffer selected*/
- int waiting; /* Processes waiting on free bufs */
+ wait_queue_head_t waiting; /* Processes waiting on free bufs */
} drm_device_dma_t;
-#ifdef DRM_AGP
-
+#if __REALLY_HAVE_AGP
typedef struct drm_agp_mem {
void *handle;
unsigned long bound; /* address */
@@ -454,27 +361,45 @@ typedef struct drm_agp_head {
int acquired;
unsigned long base;
int agp_mtrr;
+ int cant_use_aperture;
+ unsigned long page_mask;
} drm_agp_head_t;
-
#endif
-typedef struct drm_device {
+typedef struct drm_sg_mem {
+ unsigned long handle;
+ void *virtual;
+ int pages;
+ struct page **pagelist;
+} drm_sg_mem_t;
+
+typedef struct drm_sigdata {
+ int context;
+ drm_hw_lock_t *lock;
+} drm_sigdata_t;
+
+typedef TAILQ_HEAD(drm_map_list, drm_map_list_entry) drm_map_list_t;
+typedef struct drm_map_list_entry {
+ TAILQ_ENTRY(drm_map_list_entry) link;
+ drm_map_t *map;
+} drm_map_list_entry_t;
+
+struct drm_device {
const char *name; /* Simple driver name */
char *unique; /* Unique identifier: e.g., busid */
int unique_len; /* Length of unique field */
device_t device; /* Device instance from newbus */
dev_t devnode; /* Device number for mknod */
char *devname; /* For /proc/interrupts */
-
+
int blocked; /* Blocked due to VC switch? */
int flags; /* Flags to open(2) */
int writable; /* Opened with FWRITE */
struct proc_dir_entry *root; /* Root for this device's entries */
/* Locks */
- struct simplelock count_lock; /* For inuse, open_count, buf_use */
- struct lock dev_lock; /* For others */
-
+ DRM_OS_SPINTYPE count_lock; /* For inuse, open_count, buf_use */
+ struct lock dev_lock; /* For others */
/* Usage Counters */
int open_count; /* Outstanding files open */
atomic_t ioctl_count; /* Outstanding IOCTLs pending */
@@ -482,26 +407,22 @@ typedef struct drm_device {
int buf_use; /* Buffers in use -- cannot alloc */
atomic_t buf_alloc; /* Buffer allocation in progress */
- /* Performance Counters */
- atomic_t total_open;
- atomic_t total_close;
- atomic_t total_ioctl;
- atomic_t total_irq; /* Total interruptions */
- atomic_t total_ctx; /* Total context switches */
-
- atomic_t total_locks;
- atomic_t total_unlocks;
- atomic_t total_contends;
- atomic_t total_sleeps;
+ /* Performance counters */
+ unsigned long counters;
+ drm_stat_type_t types[15];
+ atomic_t counts[15];
/* Authentication */
drm_file_list_t files;
drm_magic_head_t magiclist[DRM_HASH_SIZE];
/* Memory management */
- drm_map_t **maplist; /* Vector of pointers to regions */
+ drm_map_list_t *maplist; /* Linked list of regions */
int map_count; /* Number of mappable regions */
+ drm_map_t **context_sareas;
+ int max_context;
+
drm_vma_entry_t *vmalist; /* List of vmas (for debugging) */
drm_lock_data_t lock; /* Information on hardware lock */
@@ -513,223 +434,199 @@ typedef struct drm_device {
drm_device_dma_t *dma; /* Optional pointer for DMA support */
/* Context support */
- struct resource *irq; /* Interrupt used by board */
+ int irq; /* Interrupt used by board */
+ struct resource *irqr; /* Resource for interrupt used by board */
void *irqh; /* Handle from bus_setup_intr */
- __volatile__ long context_flag; /* Context swapping flag */
- __volatile__ long interrupt_flag;/* Interruption handler flag */
- __volatile__ long dma_flag; /* DMA dispatch flag */
- struct callout timer; /* Timer for delaying ctx switch */
- int context_wait; /* Processes waiting on ctx switch */
+ __volatile__ long context_flag; /* Context swapping flag */
+ __volatile__ long interrupt_flag; /* Interruption handler flag */
+ __volatile__ long dma_flag; /* DMA dispatch flag */
+ struct callout timer; /* Timer for delaying ctx switch */
+ wait_queue_head_t context_wait; /* Processes waiting on ctx switch */
int last_checked; /* Last context checked for DMA */
int last_context; /* Last current context */
- int last_switch; /* Time at last context switch */
+ unsigned long last_switch; /* jiffies at last context switch */
#if __FreeBSD_version >= 400005
- struct task task;
+ struct task task;
#endif
- struct timespec ctx_start;
- struct timespec lck_start;
-#if DRM_DMA_HISTOGRAM
+ cycles_t ctx_start;
+ cycles_t lck_start;
+#if __HAVE_DMA_HISTOGRAM
drm_histogram_t histo;
#endif
-
+
/* Callback to X server for context switch
and for heavy-handed reset. */
char buf[DRM_BSZ]; /* Output buffer */
char *buf_rp; /* Read pointer */
char *buf_wp; /* Write pointer */
char *buf_end; /* End pointer */
- struct sigio *buf_sigio; /* Processes waiting for SIGIO */
+ struct sigio *buf_sigio; /* Processes waiting for SIGIO */
struct selinfo buf_sel; /* Workspace for select/poll */
- int buf_readers; /* Processes waiting to read */
- int buf_writers; /* Processes waiting to ctx switch */
- int buf_selecting; /* True if poll sleeper */
+ int buf_selecting;/* True if poll sleeper */
+ wait_queue_head_t buf_readers; /* Processes waiting to read */
+ wait_queue_head_t buf_writers; /* Processes waiting to ctx switch */
/* Sysctl support */
struct drm_sysctl_info *sysctl;
-#ifdef DRM_AGP
+#if __REALLY_HAVE_AGP
drm_agp_head_t *agp;
#endif
- u_int32_t *ctx_bitmap;
+ struct pci_dev *pdev;
+#ifdef __alpha__
+#if LINUX_VERSION_CODE < 0x020403
+ struct pci_controler *hose;
+#else
+ struct pci_controller *hose;
+#endif
+#endif
+ drm_sg_mem_t *sg; /* Scatter gather memory */
+ unsigned long *ctx_bitmap;
void *dev_private;
-} drm_device_t;
-
-
- /* Internal function definitions */
-
- /* Misc. support (init.c) */
-extern int drm_flags;
-extern void drm_parse_options(char *s);
-
-
- /* Device support (fops.c) */
-extern drm_file_t *drm_find_file_by_proc(drm_device_t *dev, struct proc *p);
-extern int drm_open_helper(dev_t kdev, int flags, int fmt, struct proc *p,
- drm_device_t *dev);
-extern d_close_t drm_close;
-extern d_read_t drm_read;
-extern d_write_t drm_write;
-extern d_poll_t drm_poll;
-extern int drm_fsetown(dev_t kdev, u_long cmd, caddr_t data,
- int flags, struct proc *p);
-extern int drm_fgetown(dev_t kdev, u_long cmd, caddr_t data,
- int flags, struct proc *p);
-extern int drm_write_string(drm_device_t *dev, const char *s);
-
-#if 0
- /* Mapping support (vm.c) */
-extern unsigned long drm_vm_nopage(struct vm_area_struct *vma,
- unsigned long address,
- int write_access);
-extern unsigned long drm_vm_shm_nopage(struct vm_area_struct *vma,
- unsigned long address,
- int write_access);
-extern unsigned long drm_vm_dma_nopage(struct vm_area_struct *vma,
- unsigned long address,
- int write_access);
-extern void drm_vm_open(struct vm_area_struct *vma);
-extern void drm_vm_close(struct vm_area_struct *vma);
-extern int drm_mmap_dma(struct file *filp,
- struct vm_area_struct *vma);
+ drm_sigdata_t sigdata; /* For block_all_signals */
+ sigset_t sigmask;
+};
+
+extern int DRM(flags);
+extern void DRM(parse_options)( char *s );
+extern int DRM(cpu_valid)( void );
+
+ /* Authentication (drm_auth.h) */
+extern int DRM(add_magic)(drm_device_t *dev, drm_file_t *priv,
+ drm_magic_t magic);
+extern int DRM(remove_magic)(drm_device_t *dev, drm_magic_t magic);
+
+ /* Driver support (drm_drv.h) */
+extern int DRM(version)( DRM_OS_IOCTL );
+extern int DRM(write_string)(drm_device_t *dev, const char *s);
+
+ /* Memory management support (drm_memory.h) */
+extern void DRM(mem_init)(void);
+extern void *DRM(alloc)(size_t size, int area);
+extern void *DRM(realloc)(void *oldpt, size_t oldsize, size_t size,
+ int area);
+extern char *DRM(strdup)(const char *s, int area);
+extern void DRM(strfree)(char *s, int area);
+extern void DRM(free)(void *pt, size_t size, int area);
+extern unsigned long DRM(alloc_pages)(int order, int area);
+extern void DRM(free_pages)(unsigned long address, int order,
+ int area);
+extern void *DRM(ioremap)(unsigned long offset, unsigned long size);
+extern void DRM(ioremapfree)(void *pt, unsigned long size);
+
+#if __REALLY_HAVE_AGP
+extern agp_memory *DRM(alloc_agp)(int pages, u32 type);
+extern int DRM(free_agp)(agp_memory *handle, int pages);
+extern int DRM(bind_agp)(agp_memory *handle, unsigned int start);
+extern int DRM(unbind_agp)(agp_memory *handle);
#endif
-extern d_mmap_t drm_mmap;
- /* Proc support (proc.c) */
-extern int drm_sysctl_init(drm_device_t *dev);
-extern int drm_sysctl_cleanup(drm_device_t *dev);
+extern int DRM(context_switch)(drm_device_t *dev, int old, int new);
+extern int DRM(context_switch_complete)(drm_device_t *dev, int new);
- /* Memory management support (memory.c) */
-extern void drm_mem_init(void);
+#if __HAVE_CTX_BITMAP
+extern int DRM(ctxbitmap_init)( drm_device_t *dev );
+extern void DRM(ctxbitmap_cleanup)( drm_device_t *dev );
+extern void DRM(ctxbitmap_free)( drm_device_t *dev, int ctx_handle );
+extern int DRM(ctxbitmap_next)( drm_device_t *dev );
+#endif
-#if __FreeBSD_version < 411000
-#define DRM_SYSCTL_HANDLER_ARGS SYSCTL_HANDLER_ARGS
-#else
-#define DRM_SYSCTL_HANDLER_ARGS (SYSCTL_HANDLER_ARGS)
+ /* Locking IOCTL support (drm_lock.h) */
+extern int DRM(lock_take)(__volatile__ unsigned int *lock,
+ unsigned int context);
+extern int DRM(lock_transfer)(drm_device_t *dev,
+ __volatile__ unsigned int *lock,
+ unsigned int context);
+extern int DRM(lock_free)(drm_device_t *dev,
+ __volatile__ unsigned int *lock,
+ unsigned int context);
+extern int DRM(flush_unblock)(drm_device_t *dev, int context,
+ drm_lock_flags_t flags);
+extern int DRM(flush_block_and_flush)(drm_device_t *dev, int context,
+ drm_lock_flags_t flags);
+extern int DRM(notifier)(void *priv);
+
+ /* Buffer management support (drm_bufs.h) */
+extern int DRM(order)( unsigned long size );
+
+#if __HAVE_DMA
+ /* DMA support (drm_dma.h) */
+extern int DRM(dma_setup)(drm_device_t *dev);
+extern void DRM(dma_takedown)(drm_device_t *dev);
+extern void DRM(free_buffer)(drm_device_t *dev, drm_buf_t *buf);
+extern void DRM(reclaim_buffers)(drm_device_t *dev, pid_t pid);
+#if __HAVE_OLD_DMA
+/* GH: This is a dirty hack for now...
+ */
+extern void DRM(clear_next_buffer)(drm_device_t *dev);
+extern int DRM(select_queue)(drm_device_t *dev,
+ void (*wrapper)(unsigned long));
+extern int DRM(dma_enqueue)(drm_device_t *dev, drm_dma_t *dma);
+extern int DRM(dma_get_buffers)(drm_device_t *dev, drm_dma_t *dma);
+#endif
+#if __HAVE_DMA_IRQ
+extern int DRM(irq_install)( drm_device_t *dev, int irq );
+extern int DRM(irq_uninstall)( drm_device_t *dev );
+extern void DRM(dma_service)( DRM_OS_IRQ_ARGS );
+#if __HAVE_DMA_IRQ_BH
+extern void DRM(dma_immediate_bh)( DRM_OS_TASKQUEUE_ARGS );
#endif
-extern int drm_mem_info DRM_SYSCTL_HANDLER_ARGS;
-extern void *drm_alloc(size_t size, int area);
-extern void *drm_realloc(void *oldpt, size_t oldsize, size_t size,
- int area);
-extern char *drm_strdup(const char *s, int area);
-extern void drm_strfree(char *s, int area);
-extern void drm_free(void *pt, size_t size, int area);
-extern unsigned long drm_alloc_pages(int order, int area);
-extern void drm_free_pages(unsigned long address, int order,
- int area);
-extern void *drm_ioremap(unsigned long offset, unsigned long size);
-extern void drm_ioremapfree(void *pt, unsigned long size);
-
-#ifdef DRM_AGP
-extern void *drm_alloc_agp(int pages, u_int32_t type);
-extern int drm_free_agp(void *handle, int pages);
-extern int drm_bind_agp(void *handle, unsigned int start);
-extern int drm_unbind_agp(void *handle);
#endif
-
- /* Buffer management support (bufs.c) */
-extern int drm_order(unsigned long size);
-extern d_ioctl_t drm_addmap;
-extern d_ioctl_t drm_addbufs;
-extern d_ioctl_t drm_infobufs;
-extern d_ioctl_t drm_markbufs;
-extern d_ioctl_t drm_freebufs;
-extern d_ioctl_t drm_mapbufs;
-
-
- /* Buffer list management support (lists.c) */
-extern int drm_waitlist_create(drm_waitlist_t *bl, int count);
-extern int drm_waitlist_destroy(drm_waitlist_t *bl);
-extern int drm_waitlist_put(drm_waitlist_t *bl, drm_buf_t *buf);
-extern drm_buf_t *drm_waitlist_get(drm_waitlist_t *bl);
-
-extern int drm_freelist_create(drm_freelist_t *bl, int count);
-extern int drm_freelist_destroy(drm_freelist_t *bl);
-extern int drm_freelist_put(drm_device_t *dev, drm_freelist_t *bl,
- drm_buf_t *buf);
-extern drm_buf_t *drm_freelist_get(drm_freelist_t *bl, int block);
-
- /* DMA support (gen_dma.c) */
-extern void drm_dma_setup(drm_device_t *dev);
-extern void drm_dma_takedown(drm_device_t *dev);
-extern void drm_free_buffer(drm_device_t *dev, drm_buf_t *buf);
-extern void drm_reclaim_buffers(drm_device_t *dev, pid_t pid);
-extern int drm_context_switch(drm_device_t *dev, int old, int new);
-extern int drm_context_switch_complete(drm_device_t *dev, int new);
-extern void drm_wakeup(drm_device_t *dev, drm_buf_t *buf);
-extern void drm_clear_next_buffer(drm_device_t *dev);
-extern int drm_select_queue(drm_device_t *dev,
- void (*wrapper)(void *));
-extern int drm_dma_enqueue(drm_device_t *dev, drm_dma_t *dma);
-extern int drm_dma_get_buffers(drm_device_t *dev, drm_dma_t *dma);
#if DRM_DMA_HISTOGRAM
-extern int drm_histogram_slot(struct timespec *ts);
-extern void drm_histogram_compute(drm_device_t *dev, drm_buf_t *buf);
+extern int DRM(histogram_slot)(unsigned long count);
+extern void DRM(histogram_compute)(drm_device_t *dev, drm_buf_t *buf);
#endif
+ /* Buffer list support (drm_lists.h) */
+#if __HAVE_DMA_WAITLIST
+extern int DRM(waitlist_create)(drm_waitlist_t *bl, int count);
+extern int DRM(waitlist_destroy)(drm_waitlist_t *bl);
+extern int DRM(waitlist_put)(drm_waitlist_t *bl, drm_buf_t *buf);
+extern drm_buf_t *DRM(waitlist_get)(drm_waitlist_t *bl);
+#endif
+#if __HAVE_DMA_FREELIST
+extern int DRM(freelist_create)(drm_freelist_t *bl, int count);
+extern int DRM(freelist_destroy)(drm_freelist_t *bl);
+extern int DRM(freelist_put)(drm_device_t *dev, drm_freelist_t *bl,
+ drm_buf_t *buf);
+extern drm_buf_t *DRM(freelist_get)(drm_freelist_t *bl, int block);
+#endif
+#endif /* __HAVE_DMA */
+
+#if __REALLY_HAVE_AGP
+ /* AGP/GART support (drm_agpsupport.h) */
+extern drm_agp_head_t *DRM(agp_init)(void);
+extern void DRM(agp_uninit)(void);
+extern void DRM(agp_do_release)(void);
+extern agp_memory *DRM(agp_allocate_memory)(size_t pages, u32 type);
+extern int DRM(agp_free_memory)(agp_memory *handle);
+extern int DRM(agp_bind_memory)(agp_memory *handle, off_t start);
+extern int DRM(agp_unbind_memory)(agp_memory *handle);
+#endif
- /* Misc. IOCTL support (ioctl.c) */
-extern d_ioctl_t drm_irq_busid;
-extern d_ioctl_t drm_getunique;
-extern d_ioctl_t drm_setunique;
-
-
- /* Context IOCTL support (context.c) */
-extern d_ioctl_t drm_resctx;
-extern d_ioctl_t drm_addctx;
-extern d_ioctl_t drm_modctx;
-extern d_ioctl_t drm_getctx;
-extern d_ioctl_t drm_switchctx;
-extern d_ioctl_t drm_newctx;
-extern d_ioctl_t drm_rmctx;
-
-
- /* Drawable IOCTL support (drawable.c) */
-extern d_ioctl_t drm_adddraw;
-extern d_ioctl_t drm_rmdraw;
-
-
- /* Authentication IOCTL support (auth.c) */
-extern int drm_add_magic(drm_device_t *dev, drm_file_t *priv,
- drm_magic_t magic);
-extern int drm_remove_magic(drm_device_t *dev, drm_magic_t magic);
-extern d_ioctl_t drm_getmagic;
-extern d_ioctl_t drm_authmagic;
-
-
- /* Locking IOCTL support (lock.c) */
-extern d_ioctl_t drm_block;
-extern d_ioctl_t drm_unblock;
-extern int drm_lock_take(__volatile__ unsigned int *lock,
- unsigned int context);
-extern int drm_lock_transfer(drm_device_t *dev,
- __volatile__ unsigned int *lock,
- unsigned int context);
-extern int drm_lock_free(drm_device_t *dev,
- __volatile__ unsigned int *lock,
- unsigned int context);
-extern d_ioctl_t drm_finish;
-extern int drm_flush_unblock(drm_device_t *dev, int context,
- drm_lock_flags_t flags);
-extern int drm_flush_block_and_flush(drm_device_t *dev, int context,
- drm_lock_flags_t flags);
-
- /* Context Bitmap support (ctxbitmap.c) */
-extern int drm_ctxbitmap_init(drm_device_t *dev);
-extern void drm_ctxbitmap_cleanup(drm_device_t *dev);
-extern int drm_ctxbitmap_next(drm_device_t *dev);
-extern void drm_ctxbitmap_free(drm_device_t *dev, int ctx_handle);
-
-#ifdef DRM_AGP
- /* AGP/GART support (agpsupport.c) */
-extern drm_agp_head_t *drm_agp_init(void);
-extern d_ioctl_t drm_agp_acquire;
-extern d_ioctl_t drm_agp_release;
-extern d_ioctl_t drm_agp_enable;
-extern d_ioctl_t drm_agp_info;
-extern d_ioctl_t drm_agp_alloc;
-extern d_ioctl_t drm_agp_free;
-extern d_ioctl_t drm_agp_unbind;
-extern d_ioctl_t drm_agp_bind;
+ /* Proc support (drm_proc.h) */
+extern struct proc_dir_entry *DRM(proc_init)(drm_device_t *dev,
+ int minor,
+ struct proc_dir_entry *root,
+ struct proc_dir_entry **dev_root);
+extern int DRM(proc_cleanup)(int minor,
+ struct proc_dir_entry *root,
+ struct proc_dir_entry *dev_root);
+
+#if __HAVE_SG
+ /* Scatter Gather Support (drm_scatter.h) */
+extern void DRM(sg_cleanup)(drm_sg_mem_t *entry);
#endif
+
+#if __REALLY_HAVE_SG
+ /* ATI PCIGART support (ati_pcigart.h) */
+extern int DRM(ati_pcigart_init)(drm_device_t *dev,
+ unsigned long *addr,
+ dma_addr_t *bus_addr);
+extern int DRM(ati_pcigart_cleanup)(drm_device_t *dev,
+ unsigned long addr,
+ dma_addr_t bus_addr);
#endif
+
+#endif /* __KERNEL__ */
#endif
diff --git a/bsd-core/drm_agpsupport.c b/bsd-core/drm_agpsupport.c
new file mode 100644
index 00000000..ac12c867
--- /dev/null
+++ b/bsd-core/drm_agpsupport.c
@@ -0,0 +1,326 @@
+/* drm_agpsupport.h -- DRM support for AGP/GART backend -*- linux-c -*-
+ * Created: Mon Dec 13 09:56:45 1999 by faith@precisioninsight.com
+ *
+ * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
+ * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Author:
+ * Rickard E. (Rik) Faith <faith@valinux.com>
+ * Gareth Hughes <gareth@valinux.com>
+ */
+
+#include "drmP.h"
+
+#include <vm/vm.h>
+#include <vm/pmap.h>
+#if __REALLY_HAVE_AGP
+#include <sys/agpio.h>
+#endif
+
+int DRM(agp_info)(DRM_OS_IOCTL)
+{
+ drm_device_t *dev = kdev->si_drv1;
+ struct agp_info *kern;
+ drm_agp_info_t info;
+
+ if (!dev->agp || !dev->agp->acquired) return EINVAL;
+
+ kern = &dev->agp->info;
+ agp_get_info(dev->agp->agpdev, kern);
+ info.agp_version_major = 1;
+ info.agp_version_minor = 0;
+ info.mode = kern->ai_mode;
+ info.aperture_base = kern->ai_aperture_base;
+ info.aperture_size = kern->ai_aperture_size;
+ info.memory_allowed = kern->ai_memory_allowed;
+ info.memory_used = kern->ai_memory_used;
+ info.id_vendor = kern->ai_devid & 0xffff;
+ info.id_device = kern->ai_devid >> 16;
+
+ *(drm_agp_info_t *) data = info;
+ return 0;
+}
+
+int DRM(agp_acquire)(DRM_OS_IOCTL)
+{
+ drm_device_t *dev = kdev->si_drv1;
+ int retcode;
+
+ if (!dev->agp || dev->agp->acquired) return EINVAL;
+ retcode = agp_acquire(dev->agp->agpdev);
+ if (retcode) return retcode;
+ dev->agp->acquired = 1;
+ return 0;
+}
+
+int DRM(agp_release)(DRM_OS_IOCTL)
+{
+ drm_device_t *dev = kdev->si_drv1;
+
+ if (!dev->agp || !dev->agp->acquired)
+ return EINVAL;
+ agp_release(dev->agp->agpdev);
+ dev->agp->acquired = 0;
+ return 0;
+
+}
+
+void DRM(agp_do_release)(void)
+{
+ device_t agpdev;
+
+ agpdev = agp_find_device();
+ if (agpdev)
+ agp_release(agpdev);
+}
+
+int DRM(agp_enable)(DRM_OS_IOCTL)
+{
+ drm_device_t *dev = kdev->si_drv1;
+ drm_agp_mode_t mode;
+
+ if (!dev->agp || !dev->agp->acquired) return EINVAL;
+
+ mode = *(drm_agp_mode_t *) data;
+
+ dev->agp->mode = mode.mode;
+ agp_enable(dev->agp->agpdev, mode.mode);
+ dev->agp->base = dev->agp->info.ai_aperture_base;
+ dev->agp->enabled = 1;
+ return 0;
+}
+
+int DRM(agp_alloc)(DRM_OS_IOCTL)
+{
+ drm_device_t *dev = kdev->si_drv1;
+ drm_agp_buffer_t request;
+ drm_agp_mem_t *entry;
+ void *handle;
+ unsigned long pages;
+ u_int32_t type;
+ struct agp_memory_info info;
+
+ if (!dev->agp || !dev->agp->acquired) return EINVAL;
+
+ request = *(drm_agp_buffer_t *) data;
+
+ if (!(entry = DRM(alloc)(sizeof(*entry), DRM_MEM_AGPLISTS)))
+ return ENOMEM;
+
+ bzero(entry, sizeof(*entry));
+
+ pages = (request.size + PAGE_SIZE - 1) / PAGE_SIZE;
+ type = (u_int32_t) request.type;
+
+ if (!(handle = DRM(alloc_agp)(pages, type))) {
+ DRM(free)(entry, sizeof(*entry), DRM_MEM_AGPLISTS);
+ return ENOMEM;
+ }
+
+ entry->handle = handle;
+ entry->bound = 0;
+ entry->pages = pages;
+ entry->prev = NULL;
+ entry->next = dev->agp->memory;
+ if (dev->agp->memory) dev->agp->memory->prev = entry;
+ dev->agp->memory = entry;
+
+ agp_memory_info(dev->agp->agpdev, entry->handle, &info);
+
+ request.handle = (unsigned long) entry->handle;
+ request.physical = info.ami_physical;
+
+ *(drm_agp_buffer_t *) data = request;
+
+ return 0;
+}
+
+static drm_agp_mem_t * DRM(agp_lookup_entry)(drm_device_t *dev, void *handle)
+{
+ drm_agp_mem_t *entry;
+
+ for (entry = dev->agp->memory; entry; entry = entry->next) {
+ if (entry->handle == handle) return entry;
+ }
+ return NULL;
+}
+
+int DRM(agp_unbind)(DRM_OS_IOCTL)
+{
+ drm_device_t *dev = kdev->si_drv1;
+ drm_agp_binding_t request;
+ drm_agp_mem_t *entry;
+ int retcode;
+
+ if (!dev->agp || !dev->agp->acquired) return EINVAL;
+ request = *(drm_agp_binding_t *) data;
+ if (!(entry = DRM(agp_lookup_entry)(dev, (void *) request.handle)))
+ return EINVAL;
+ if (!entry->bound) return EINVAL;
+ retcode=DRM(unbind_agp)(entry->handle);
+ if (!retcode)
+ {
+ entry->bound=0;
+ return 0;
+ }
+ else
+ return retcode;
+}
+
+int DRM(agp_bind)(DRM_OS_IOCTL)
+{
+ drm_device_t *dev = kdev->si_drv1;
+ drm_agp_binding_t request;
+ drm_agp_mem_t *entry;
+ int retcode;
+ int page;
+
+ DRM_DEBUG("agp_bind, page_size=%x\n", PAGE_SIZE);
+ if (!dev->agp || !dev->agp->acquired)
+ return EINVAL;
+ request = *(drm_agp_binding_t *) data;
+ if (!(entry = DRM(agp_lookup_entry)(dev, (void *) request.handle)))
+ return EINVAL;
+ if (entry->bound) return EINVAL;
+ page = (request.offset + PAGE_SIZE - 1) / PAGE_SIZE;
+ if ((retcode = DRM(bind_agp)(entry->handle, page)))
+ return retcode;
+ entry->bound = dev->agp->base + (page << PAGE_SHIFT);
+ return 0;
+}
+
+int DRM(agp_free)(DRM_OS_IOCTL)
+{
+ drm_device_t *dev = kdev->si_drv1;
+ drm_agp_buffer_t request;
+ drm_agp_mem_t *entry;
+
+ if (!dev->agp || !dev->agp->acquired) return EINVAL;
+ request = *(drm_agp_buffer_t *) data;
+ if (!(entry = DRM(agp_lookup_entry)(dev, (void*) request.handle)))
+ return EINVAL;
+ if (entry->bound) DRM(unbind_agp)(entry->handle);
+
+ if (entry->prev) entry->prev->next = entry->next;
+ else dev->agp->memory = entry->next;
+ if (entry->next) entry->next->prev = entry->prev;
+ DRM(free_agp)(entry->handle, entry->pages);
+ DRM(free)(entry, sizeof(*entry), DRM_MEM_AGPLISTS);
+ return 0;
+}
+
+drm_agp_head_t *DRM(agp_init)(void)
+{
+ device_t agpdev;
+ drm_agp_head_t *head = NULL;
+ int agp_available = 1;
+
+ agpdev = agp_find_device();
+ if (!agpdev)
+ agp_available = 0;
+
+ DRM_DEBUG("agp_available = %d\n", agp_available);
+
+ if (agp_available) {
+ if (!(head = DRM(alloc)(sizeof(*head), DRM_MEM_AGPLISTS)))
+ return NULL;
+ bzero((void *)head, sizeof(*head));
+ head->agpdev = agpdev;
+ agp_get_info(agpdev, &head->info);
+ head->memory = NULL;
+#if 0 /* bogus */
+ switch (head->agp_info.chipset) {
+ case INTEL_GENERIC: head->chipset = "Intel"; break;
+ case INTEL_LX: head->chipset = "Intel 440LX"; break;
+ case INTEL_BX: head->chipset = "Intel 440BX"; break;
+ case INTEL_GX: head->chipset = "Intel 440GX"; break;
+ case INTEL_I810: head->chipset = "Intel i810"; break;
+ case VIA_GENERIC: head->chipset = "VIA"; break;
+ case VIA_VP3: head->chipset = "VIA VP3"; break;
+ case VIA_MVP3: head->chipset = "VIA MVP3"; break;
+ case VIA_APOLLO_PRO: head->chipset = "VIA Apollo Pro"; break;
+ case SIS_GENERIC: head->chipset = "SiS"; break;
+ case AMD_GENERIC: head->chipset = "AMD"; break;
+ case AMD_IRONGATE: head->chipset = "AMD Irongate"; break;
+ case ALI_GENERIC: head->chipset = "ALi"; break;
+ case ALI_M1541: head->chipset = "ALi M1541"; break;
+ default:
+ }
+#endif
+ DRM_INFO("AGP at 0x%08x %dMB\n",
+ head->info.ai_aperture_base,
+ head->info.ai_aperture_size >> 20);
+ }
+ return head;
+}
+
+void DRM(agp_uninit)(void)
+{
+/* FIXME: What goes here */
+}
+
+
+agp_memory *DRM(agp_allocate_memory)(size_t pages, u32 type)
+{
+ device_t agpdev;
+
+ agpdev = agp_find_device();
+ if (!agpdev)
+ return NULL;
+
+ return agp_alloc_memory(agpdev, type, pages << AGP_PAGE_SHIFT);
+}
+
+int DRM(agp_free_memory)(agp_memory *handle)
+{
+ device_t agpdev;
+
+ agpdev = agp_find_device();
+ if (!agpdev || !handle)
+ return 0;
+
+ agp_free_memory(agpdev, handle);
+ return 1;
+}
+
+int DRM(agp_bind_memory)(agp_memory *handle, off_t start)
+{
+ device_t agpdev;
+
+ agpdev = agp_find_device();
+ if (!agpdev || !handle)
+ return EINVAL;
+
+ return agp_bind_memory(agpdev, handle, start * PAGE_SIZE);
+}
+
+int DRM(agp_unbind_memory)(agp_memory *handle)
+{
+ device_t agpdev;
+
+ agpdev = agp_find_device();
+ if (!agpdev || !handle)
+ return EINVAL;
+
+ return agp_unbind_memory(agpdev, handle);
+}
diff --git a/bsd-core/drm_auth.c b/bsd-core/drm_auth.c
new file mode 100644
index 00000000..f2c2d8da
--- /dev/null
+++ b/bsd-core/drm_auth.c
@@ -0,0 +1,166 @@
+/* drm_auth.h -- IOCTLs for authentication -*- linux-c -*-
+ * Created: Tue Feb 2 08:37:54 1999 by faith@valinux.com
+ *
+ * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
+ * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors:
+ * Rickard E. (Rik) Faith <faith@valinux.com>
+ * Gareth Hughes <gareth@valinux.com>
+ */
+
+#define __NO_VERSION__
+#include "drmP.h"
+
+static int DRM(hash_magic)(drm_magic_t magic)
+{
+ return magic & (DRM_HASH_SIZE-1);
+}
+
+static drm_file_t *DRM(find_file)(drm_device_t *dev, drm_magic_t magic)
+{
+ drm_file_t *retval = NULL;
+ drm_magic_entry_t *pt;
+ int hash = DRM(hash_magic)(magic);
+
+ DRM_OS_LOCK;
+ for (pt = dev->magiclist[hash].head; pt; pt = pt->next) {
+ if (pt->magic == magic) {
+ retval = pt->priv;
+ break;
+ }
+ }
+ DRM_OS_UNLOCK;
+ return retval;
+}
+
+int DRM(add_magic)(drm_device_t *dev, drm_file_t *priv, drm_magic_t magic)
+{
+ int hash;
+ drm_magic_entry_t *entry;
+
+ DRM_DEBUG("%d\n", magic);
+
+ hash = DRM(hash_magic)(magic);
+ entry = (drm_magic_entry_t*) DRM(alloc)(sizeof(*entry), DRM_MEM_MAGIC);
+ if (!entry) DRM_OS_RETURN(ENOMEM);
+ entry->magic = magic;
+ entry->priv = priv;
+ entry->next = NULL;
+
+ DRM_OS_LOCK;
+ if (dev->magiclist[hash].tail) {
+ dev->magiclist[hash].tail->next = entry;
+ dev->magiclist[hash].tail = entry;
+ } else {
+ dev->magiclist[hash].head = entry;
+ dev->magiclist[hash].tail = entry;
+ }
+ DRM_OS_UNLOCK;
+
+ return 0;
+}
+
+int DRM(remove_magic)(drm_device_t *dev, drm_magic_t magic)
+{
+ drm_magic_entry_t *prev = NULL;
+ drm_magic_entry_t *pt;
+ int hash;
+
+ DRM_DEBUG("%d\n", magic);
+ hash = DRM(hash_magic)(magic);
+
+ DRM_OS_LOCK;
+ for (pt = dev->magiclist[hash].head; pt; prev = pt, pt = pt->next) {
+ if (pt->magic == magic) {
+ if (dev->magiclist[hash].head == pt) {
+ dev->magiclist[hash].head = pt->next;
+ }
+ if (dev->magiclist[hash].tail == pt) {
+ dev->magiclist[hash].tail = prev;
+ }
+ if (prev) {
+ prev->next = pt->next;
+ }
+ DRM_OS_UNLOCK;
+ DRM(free)(pt, sizeof(*pt), DRM_MEM_MAGIC);
+ return 0;
+ }
+ }
+ DRM_OS_UNLOCK;
+
+ DRM(free)(pt, sizeof(*pt), DRM_MEM_MAGIC);
+ DRM_OS_RETURN(EINVAL);
+}
+
+int DRM(getmagic)(DRM_OS_IOCTL)
+{
+ static drm_magic_t sequence = 0;
+ drm_auth_t auth;
+ static DRM_OS_SPINTYPE lock;
+ static int first = 1;
+ DRM_OS_DEVICE;
+ DRM_OS_PRIV;
+
+ if (first) {
+ DRM_OS_SPININIT(lock, "drm getmagic");
+ first = 0;
+ }
+
+ /* Find unique magic */
+ if (priv->magic) {
+ auth.magic = priv->magic;
+ } else {
+ do {
+ DRM_OS_SPINLOCK(&lock);
+ if (!sequence) ++sequence; /* reserve 0 */
+ auth.magic = sequence++;
+ DRM_OS_SPINUNLOCK(&lock);
+ } while (DRM(find_file)(dev, auth.magic));
+ priv->magic = auth.magic;
+ DRM(add_magic)(dev, priv, auth.magic);
+ }
+
+ DRM_DEBUG("%u\n", auth.magic);
+
+ DRM_OS_KRNTOUSR((drm_auth_t *)data, auth, sizeof(auth));
+
+ return 0;
+}
+
+int DRM(authmagic)(DRM_OS_IOCTL)
+{
+ drm_auth_t auth;
+ drm_file_t *file;
+ DRM_OS_DEVICE;
+
+ DRM_OS_KRNFROMUSR(auth, (drm_auth_t *)data, sizeof(auth));
+
+ DRM_DEBUG("%u\n", auth.magic);
+ if ((file = DRM(find_file)(dev, auth.magic))) {
+ file->authenticated = 1;
+ DRM(remove_magic)(dev, auth.magic);
+ return 0;
+ }
+ DRM_OS_RETURN(EINVAL);
+}
diff --git a/bsd-core/drm_bufs.c b/bsd-core/drm_bufs.c
new file mode 100644
index 00000000..d55b36d8
--- /dev/null
+++ b/bsd-core/drm_bufs.c
@@ -0,0 +1,1092 @@
+/* drm_bufs.h -- Generic buffer template -*- linux-c -*-
+ * Created: Thu Nov 23 03:10:50 2000 by gareth@valinux.com
+ *
+ * Copyright 1999, 2000 Precision Insight, Inc., Cedar Park, Texas.
+ * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors:
+ * Rickard E. (Rik) Faith <faith@valinux.com>
+ * Gareth Hughes <gareth@valinux.com>
+ */
+
+#define __NO_VERSION__
+#include <machine/param.h>
+#include <sys/mman.h>
+#include <vm/vm.h>
+#include <vm/pmap.h>
+#include <vm/vm_extern.h>
+#include <vm/vm_map.h>
+#include <vm/vm_param.h>
+#include "drmP.h"
+
+#ifndef __HAVE_PCI_DMA
+#define __HAVE_PCI_DMA 0
+#endif
+
+#ifndef __HAVE_SG
+#define __HAVE_SG 0
+#endif
+
+#ifndef DRIVER_BUF_PRIV_T
+#define DRIVER_BUF_PRIV_T u32
+#endif
+#ifndef DRIVER_AGP_BUFFERS_MAP
+#if __HAVE_AGP && __HAVE_DMA
+#error "You must define DRIVER_AGP_BUFFERS_MAP()"
+#else
+#define DRIVER_AGP_BUFFERS_MAP( dev ) NULL
+#endif
+#endif
+
+/*
+ * Compute order. Can be made faster.
+ */
+int DRM(order)( unsigned long size )
+{
+ int order;
+ unsigned long tmp;
+
+ for ( order = 0, tmp = size ; tmp >>= 1 ; ++order );
+
+ if ( size & ~(1 << order) )
+ ++order;
+
+ return order;
+}
+
+int DRM(addmap)( DRM_OS_IOCTL )
+{
+ DRM_OS_DEVICE;
+ drm_map_t *map;
+ drm_map_list_entry_t *list;
+
+ if (!(dev->flags & (FREAD|FWRITE)))
+ DRM_OS_RETURN(EACCES); /* Require read/write */
+
+ map = (drm_map_t *) DRM(alloc)( sizeof(*map), DRM_MEM_MAPS );
+ if ( !map )
+ DRM_OS_RETURN(ENOMEM);
+
+ *map = *(drm_map_t *)data;
+
+ /* Only allow shared memory to be removable since we only keep enough
+ * book keeping information about shared memory to allow for removal
+ * when processes fork.
+ */
+ if ( (map->flags & _DRM_REMOVABLE) && map->type != _DRM_SHM ) {
+ DRM(free)( map, sizeof(*map), DRM_MEM_MAPS );
+ DRM_OS_RETURN(EINVAL);
+ }
+ DRM_DEBUG( "offset = 0x%08lx, size = 0x%08lx, type = %d\n",
+ map->offset, map->size, map->type );
+ if ( (map->offset & PAGE_MASK) || (map->size & PAGE_MASK) ) {
+ DRM(free)( map, sizeof(*map), DRM_MEM_MAPS );
+ DRM_OS_RETURN(EINVAL);
+ }
+ map->mtrr = -1;
+ map->handle = 0;
+
+ TAILQ_FOREACH(list, dev->maplist, link) {
+ drm_map_t *entry = list->map;
+ if ( (entry->offset >= map->offset
+ && (entry->offset) < (map->offset + map->size) )
+ || ((entry->offset + entry->size) >= map->offset
+ && (entry->offset + entry->size) < (map->offset + map->size) )
+ || ((entry->offset < map->offset)
+ && (entry->offset + entry->size) >= (map->offset + map->size) ) )
+ DRM_DEBUG("map collission: add(0x%lx-0x%lx), current(0x%lx-0x%lx)\n",
+ entry->offset, entry->offset + entry->size - 1,
+ map->offset, map->offset + map->size - 1);
+ }
+
+ switch ( map->type ) {
+ case _DRM_REGISTERS:
+ case _DRM_FRAME_BUFFER:
+#if !defined(__sparc__) && !defined(__alpha__)
+ if ( map->offset + map->size < map->offset
+ ) {
+ DRM(free)( map, sizeof(*map), DRM_MEM_MAPS );
+ DRM_OS_RETURN(EINVAL);
+ }
+#endif
+#ifdef __alpha__
+ map->offset += dev->hose->mem_space->start;
+#endif
+#if __REALLY_HAVE_MTRR
+ if ( map->type == _DRM_FRAME_BUFFER ||
+ (map->flags & _DRM_WRITE_COMBINING) ) {
+ map->mtrr = mtrr_add( map->offset, map->size,
+ MTRR_TYPE_WRCOMB, 1 );
+ }
+#endif
+ map->handle = DRM(ioremap)( map->offset, map->size );
+ break;
+
+ case _DRM_SHM:
+ DRM_INFO( "%ld %d %d\n",
+ map->size, DRM(order)( map->size ), PAGE_SHIFT);
+ map->handle = (void *)DRM(alloc_pages)
+ (DRM(order)(map->size) - PAGE_SHIFT, DRM_MEM_SAREA);
+ DRM_DEBUG( "%ld %d %p\n",
+ map->size, DRM(order)( map->size ), map->handle );
+ if ( !map->handle ) {
+ DRM(free)( map, sizeof(*map), DRM_MEM_MAPS );
+ DRM_OS_RETURN(ENOMEM);
+ }
+ map->offset = (unsigned long)map->handle;
+ if ( map->flags & _DRM_CONTAINS_LOCK ) {
+ dev->lock.hw_lock = map->handle; /* Pointer to lock */
+ }
+ break;
+#if __REALLY_HAVE_AGP
+ case _DRM_AGP:
+#ifdef __alpha__
+ map->offset += dev->hose->mem_space->start;
+#endif
+ map->offset += dev->agp->base;
+ map->mtrr = dev->agp->agp_mtrr; /* for getmap */
+ break;
+#endif
+ case _DRM_SCATTER_GATHER:
+ if (!dev->sg) {
+ DRM(free)(map, sizeof(*map), DRM_MEM_MAPS);
+ DRM_OS_RETURN(EINVAL);
+ }
+ map->offset = map->offset + dev->sg->handle;
+ break;
+
+ default:
+ DRM(free)( map, sizeof(*map), DRM_MEM_MAPS );
+ DRM_OS_RETURN(EINVAL);
+ }
+
+ list = DRM(alloc)(sizeof(*list), DRM_MEM_MAPS);
+ if(!list) {
+ DRM(free)(map, sizeof(*map), DRM_MEM_MAPS);
+ DRM_OS_RETURN(EINVAL);
+ }
+ memset(list, 0, sizeof(*list));
+ list->map = map;
+
+ DRM_OS_LOCK;
+ TAILQ_INSERT_TAIL(dev->maplist, list, link);
+ DRM_OS_UNLOCK;
+
+ *(drm_map_t *)data = *map;
+
+ if ( map->type != _DRM_SHM ) {
+ ((drm_map_t *)data)->handle = (void *)map->offset;
+ }
+ return 0;
+}
+
+
+/* Remove a map private from list and deallocate resources if the mapping
+ * isn't in use.
+ */
+
+int DRM(rmmap)( DRM_OS_IOCTL )
+{
+ DRM_OS_DEVICE;
+ drm_map_list_entry_t *list;
+ drm_map_t *map;
+ drm_map_t request;
+ int found_maps = 0;
+
+ DRM_OS_KRNFROMUSR( request, (drm_map_t *)data, sizeof(request) );
+
+ DRM_OS_LOCK;
+ TAILQ_FOREACH(list, dev->maplist, link) {
+ map = list->map;
+ if(map->handle == request.handle &&
+ map->flags & _DRM_REMOVABLE) break;
+ }
+
+ /* List has wrapped around to the head pointer, or its empty we didn't
+ * find anything.
+ */
+ if(list == NULL) {
+ DRM_OS_UNLOCK;
+ DRM_OS_RETURN(EINVAL);
+ }
+ TAILQ_REMOVE(dev->maplist, list, link);
+ DRM(free)(list, sizeof(*list), DRM_MEM_MAPS);
+
+
+ if(!found_maps) {
+ switch (map->type) {
+ case _DRM_REGISTERS:
+ case _DRM_FRAME_BUFFER:
+#if __REALLY_HAVE_MTRR
+ if (map->mtrr >= 0) {
+ int retcode;
+ retcode = mtrr_del(map->mtrr,
+ map->offset,
+ map->size);
+ DRM_DEBUG("mtrr_del = %d\n", retcode);
+ }
+#endif
+ DRM(ioremapfree)(map->handle, map->size);
+ break;
+ case _DRM_SHM:
+ DRM(free_pages)( (unsigned long)map->handle, DRM(order)(map->size), DRM_MEM_SAREA );
+ break;
+ case _DRM_AGP:
+ case _DRM_SCATTER_GATHER:
+ break;
+ }
+ DRM(free)(map, sizeof(*map), DRM_MEM_MAPS);
+ }
+ DRM_OS_UNLOCK;
+ return 0;
+}
+
+#if __HAVE_DMA
+
+
+static void DRM(cleanup_buf_error)(drm_buf_entry_t *entry)
+{
+ int i;
+
+ if (entry->seg_count) {
+ for (i = 0; i < entry->seg_count; i++) {
+ DRM(free_pages)(entry->seglist[i],
+ entry->page_order,
+ DRM_MEM_DMA);
+ }
+ DRM(free)(entry->seglist,
+ entry->seg_count *
+ sizeof(*entry->seglist),
+ DRM_MEM_SEGS);
+
+ entry->seg_count = 0;
+ }
+
+ if(entry->buf_count) {
+ for(i = 0; i < entry->buf_count; i++) {
+ if(entry->buflist[i].dev_private) {
+ DRM(free)(entry->buflist[i].dev_private,
+ entry->buflist[i].dev_priv_size,
+ DRM_MEM_BUFS);
+ }
+ }
+ DRM(free)(entry->buflist,
+ entry->buf_count *
+ sizeof(*entry->buflist),
+ DRM_MEM_BUFS);
+
+#if __HAVE_DMA_FREELIST
+ DRM(freelist_destroy)(&entry->freelist);
+#endif
+
+ entry->buf_count = 0;
+ }
+}
+
+#if __REALLY_HAVE_AGP
+int DRM(addbufs_agp)( DRM_OS_IOCTL )
+{
+ DRM_OS_DEVICE;
+ drm_device_dma_t *dma = dev->dma;
+ drm_buf_desc_t request;
+ drm_buf_entry_t *entry;
+ drm_buf_t *buf;
+ unsigned long offset;
+ unsigned long agp_offset;
+ int count;
+ int order;
+ int size;
+ int alignment;
+ int page_order;
+ int total;
+ int byte_count;
+ int i;
+ drm_buf_t **temp_buflist;
+
+ if ( !dma ) DRM_OS_RETURN(EINVAL);
+
+ DRM_OS_KRNFROMUSR( request, (drm_buf_desc_t *)data, sizeof(request) );
+
+ count = request.count;
+ order = DRM(order)( request.size );
+ size = 1 << order;
+
+ alignment = (request.flags & _DRM_PAGE_ALIGN)
+ ? round_page(size) : size;
+ page_order = order - PAGE_SHIFT > 0 ? order - PAGE_SHIFT : 0;
+ total = PAGE_SIZE << page_order;
+
+ byte_count = 0;
+ agp_offset = dev->agp->base + request.agp_start;
+
+ DRM_DEBUG( "count: %d\n", count );
+ DRM_DEBUG( "order: %d\n", order );
+ DRM_DEBUG( "size: %d\n", size );
+ DRM_DEBUG( "agp_offset: 0x%lx\n", agp_offset );
+ DRM_DEBUG( "alignment: %d\n", alignment );
+ DRM_DEBUG( "page_order: %d\n", page_order );
+ DRM_DEBUG( "total: %d\n", total );
+
+ if ( order < DRM_MIN_ORDER || order > DRM_MAX_ORDER )
+ DRM_OS_RETURN(EINVAL);
+ if ( dev->queue_count )
+ DRM_OS_RETURN(EBUSY); /* Not while in use */
+
+ DRM_OS_SPINLOCK( &dev->count_lock );
+ if ( dev->buf_use ) {
+ DRM_OS_SPINUNLOCK( &dev->count_lock );
+ DRM_OS_RETURN(EBUSY);
+ }
+ atomic_inc( &dev->buf_alloc );
+ DRM_OS_SPINUNLOCK( &dev->count_lock );
+
+ DRM_OS_LOCK;
+ entry = &dma->bufs[order];
+ if ( entry->buf_count ) {
+ DRM_OS_UNLOCK;
+ atomic_dec( &dev->buf_alloc );
+ DRM_OS_RETURN(ENOMEM); /* May only call once for each order */
+ }
+
+ if (count < 0 || count > 4096) {
+ DRM_OS_UNLOCK;
+ atomic_dec( &dev->buf_alloc );
+ DRM_OS_RETURN(EINVAL);
+ }
+
+ entry->buflist = DRM(alloc)( count * sizeof(*entry->buflist),
+ DRM_MEM_BUFS );
+ if ( !entry->buflist ) {
+ DRM_OS_UNLOCK;
+ atomic_dec( &dev->buf_alloc );
+ DRM_OS_RETURN(ENOMEM);
+ }
+ memset( entry->buflist, 0, count * sizeof(*entry->buflist) );
+
+ entry->buf_size = size;
+ entry->page_order = page_order;
+
+ offset = 0;
+
+ while ( entry->buf_count < count ) {
+ buf = &entry->buflist[entry->buf_count];
+ buf->idx = dma->buf_count + entry->buf_count;
+ buf->total = alignment;
+ buf->order = order;
+ buf->used = 0;
+
+ buf->offset = (dma->byte_count + offset);
+ buf->bus_address = agp_offset + offset;
+ buf->address = (void *)(agp_offset + offset);
+ buf->next = NULL;
+ buf->waiting = 0;
+ buf->pending = 0;
+ buf->dma_wait = 0;
+ buf->pid = 0;
+
+ buf->dev_priv_size = sizeof(DRIVER_BUF_PRIV_T);
+ buf->dev_private = DRM(alloc)( sizeof(DRIVER_BUF_PRIV_T),
+ DRM_MEM_BUFS );
+ if(!buf->dev_private) {
+ /* Set count correctly so we free the proper amount. */
+ entry->buf_count = count;
+ DRM(cleanup_buf_error)(entry);
+ }
+ memset( buf->dev_private, 0, buf->dev_priv_size );
+
+#if __HAVE_DMA_HISTOGRAM
+ buf->time_queued = 0;
+ buf->time_dispatched = 0;
+ buf->time_completed = 0;
+ buf->time_freed = 0;
+#endif
+
+ offset += alignment;
+ entry->buf_count++;
+ byte_count += PAGE_SIZE << page_order;
+ }
+
+ DRM_DEBUG( "byte_count: %d\n", byte_count );
+
+ temp_buflist = DRM(realloc)( dma->buflist,
+ dma->buf_count * sizeof(*dma->buflist),
+ (dma->buf_count + entry->buf_count)
+ * sizeof(*dma->buflist),
+ DRM_MEM_BUFS );
+ if(!temp_buflist) {
+ /* Free the entry because it isn't valid */
+ DRM(cleanup_buf_error)(entry);
+ DRM_OS_UNLOCK;
+ atomic_dec( &dev->buf_alloc );
+ DRM_OS_RETURN(ENOMEM);
+ }
+ dma->buflist = temp_buflist;
+
+ for ( i = 0 ; i < entry->buf_count ; i++ ) {
+ dma->buflist[i + dma->buf_count] = &entry->buflist[i];
+ }
+
+ dma->buf_count += entry->buf_count;
+ dma->byte_count += byte_count;
+
+ DRM_DEBUG( "dma->buf_count : %d\n", dma->buf_count );
+ DRM_DEBUG( "entry->buf_count : %d\n", entry->buf_count );
+
+#if __HAVE_DMA_FREELIST
+ DRM(freelist_create)( &entry->freelist, entry->buf_count );
+ for ( i = 0 ; i < entry->buf_count ; i++ ) {
+ DRM(freelist_put)( dev, &entry->freelist, &entry->buflist[i] );
+ }
+#endif
+ DRM_OS_UNLOCK;
+
+ request.count = entry->buf_count;
+ request.size = size;
+
+ DRM_OS_KRNTOUSR( (drm_buf_desc_t *)data, request, sizeof(request) );
+
+ dma->flags = _DRM_DMA_USE_AGP;
+
+ atomic_dec( &dev->buf_alloc );
+ return 0;
+}
+#endif /* __REALLY_HAVE_AGP */
+
+#if __HAVE_PCI_DMA
+int DRM(addbufs_pci)( DRM_OS_IOCTL )
+{
+ DRM_OS_DEVICE;
+ drm_device_dma_t *dma = dev->dma;
+ drm_buf_desc_t request;
+ int count;
+ int order;
+ int size;
+ int total;
+ int page_order;
+ drm_buf_entry_t *entry;
+ unsigned long page;
+ drm_buf_t *buf;
+ int alignment;
+ unsigned long offset;
+ int i;
+ int byte_count;
+ int page_count;
+ unsigned long *temp_pagelist;
+ drm_buf_t **temp_buflist;
+
+ if ( !dma ) DRM_OS_RETURN(EINVAL);
+
+ DRM_OS_KRNFROMUSR( request, (drm_buf_desc_t *)data, sizeof(request) );
+
+ count = request.count;
+ order = DRM(order)( request.size );
+ size = 1 << order;
+
+ DRM_DEBUG( "count=%d, size=%d (%d), order=%d, queue_count=%d\n",
+ request.count, request.size, size,
+ order, dev->queue_count );
+
+ if ( order < DRM_MIN_ORDER || order > DRM_MAX_ORDER )
+ DRM_OS_RETURN(EINVAL);
+ if ( dev->queue_count )
+ DRM_OS_RETURN(EBUSY); /* Not while in use */
+
+ alignment = (request.flags & _DRM_PAGE_ALIGN)
+ ? round_page(size) : size;
+ page_order = order - PAGE_SHIFT > 0 ? order - PAGE_SHIFT : 0;
+ total = PAGE_SIZE << page_order;
+
+ DRM_OS_SPINLOCK( &dev->count_lock );
+ if ( dev->buf_use ) {
+ DRM_OS_SPINUNLOCK( &dev->count_lock );
+ DRM_OS_RETURN(EBUSY);
+ }
+ atomic_inc( &dev->buf_alloc );
+ DRM_OS_SPINUNLOCK( &dev->count_lock );
+
+ DRM_OS_LOCK;
+ entry = &dma->bufs[order];
+ if ( entry->buf_count ) {
+ DRM_OS_UNLOCK;
+ atomic_dec( &dev->buf_alloc );
+ DRM_OS_RETURN(ENOMEM); /* May only call once for each order */
+ }
+
+ if (count < 0 || count > 4096) {
+ DRM_OS_UNLOCK;
+ atomic_dec( &dev->buf_alloc );
+ DRM_OS_RETURN(EINVAL);
+ }
+
+ entry->buflist = DRM(alloc)( count * sizeof(*entry->buflist),
+ DRM_MEM_BUFS );
+ if ( !entry->buflist ) {
+ DRM_OS_UNLOCK;
+ atomic_dec( &dev->buf_alloc );
+ DRM_OS_RETURN(ENOMEM);
+ }
+ memset( entry->buflist, 0, count * sizeof(*entry->buflist) );
+
+ entry->seglist = DRM(alloc)( count * sizeof(*entry->seglist),
+ DRM_MEM_SEGS );
+ if ( !entry->seglist ) {
+ DRM(free)( entry->buflist,
+ count * sizeof(*entry->buflist),
+ DRM_MEM_BUFS );
+ DRM_OS_UNLOCK;
+ atomic_dec( &dev->buf_alloc );
+ DRM_OS_RETURN(ENOMEM);
+ }
+ memset( entry->seglist, 0, count * sizeof(*entry->seglist) );
+
+ temp_pagelist = DRM(realloc)( dma->pagelist,
+ dma->page_count * sizeof(*dma->pagelist),
+ (dma->page_count + (count << page_order))
+ * sizeof(*dma->pagelist),
+ DRM_MEM_PAGES );
+ if(!temp_pagelist) {
+ DRM(free)( entry->buflist,
+ count * sizeof(*entry->buflist),
+ DRM_MEM_BUFS );
+ DRM(free)( entry->seglist,
+ count * sizeof(*entry->seglist),
+ DRM_MEM_SEGS );
+ DRM_OS_UNLOCK;
+ atomic_dec( &dev->buf_alloc );
+ DRM_OS_RETURN(ENOMEM);
+ }
+
+ dma->pagelist = temp_pagelist;
+ DRM_DEBUG( "pagelist: %d entries\n",
+ dma->page_count + (count << page_order) );
+
+ entry->buf_size = size;
+ entry->page_order = page_order;
+ byte_count = 0;
+ page_count = 0;
+
+ while ( entry->buf_count < count ) {
+ page = DRM(alloc_pages)( page_order, DRM_MEM_DMA );
+ if ( !page ) break;
+ entry->seglist[entry->seg_count++] = page;
+ for ( i = 0 ; i < (1 << page_order) ; i++ ) {
+ DRM_DEBUG( "page %d @ 0x%08lx\n",
+ dma->page_count + page_count,
+ page + PAGE_SIZE * i );
+ dma->pagelist[dma->page_count + page_count++]
+ = page + PAGE_SIZE * i;
+ }
+ for ( offset = 0 ;
+ offset + size <= total && entry->buf_count < count ;
+ offset += alignment, ++entry->buf_count ) {
+ buf = &entry->buflist[entry->buf_count];
+ buf->idx = dma->buf_count + entry->buf_count;
+ buf->total = alignment;
+ buf->order = order;
+ buf->used = 0;
+ buf->offset = (dma->byte_count + byte_count + offset);
+ buf->address = (void *)(page + offset);
+ buf->next = NULL;
+ buf->waiting = 0;
+ buf->pending = 0;
+ buf->dma_wait = 0;
+ buf->pid = 0;
+#if __HAVE_DMA_HISTOGRAM
+ buf->time_queued = 0;
+ buf->time_dispatched = 0;
+ buf->time_completed = 0;
+ buf->time_freed = 0;
+#endif
+ DRM_DEBUG( "buffer %d @ %p\n",
+ entry->buf_count, buf->address );
+ }
+ byte_count += PAGE_SIZE << page_order;
+ }
+
+ temp_buflist = DRM(realloc)( dma->buflist,
+ dma->buf_count * sizeof(*dma->buflist),
+ (dma->buf_count + entry->buf_count)
+ * sizeof(*dma->buflist),
+ DRM_MEM_BUFS );
+ if(!temp_buflist) {
+ /* Free the entry because it isn't valid */
+ DRM(cleanup_buf_error)(entry);
+ DRM_OS_UNLOCK;
+ atomic_dec( &dev->buf_alloc );
+ DRM_OS_RETURN(ENOMEM);
+ }
+ dma->buflist = temp_buflist;
+
+ for ( i = 0 ; i < entry->buf_count ; i++ ) {
+ dma->buflist[i + dma->buf_count] = &entry->buflist[i];
+ }
+
+ dma->buf_count += entry->buf_count;
+ dma->seg_count += entry->seg_count;
+ dma->page_count += entry->seg_count << page_order;
+ dma->byte_count += PAGE_SIZE * (entry->seg_count << page_order);
+
+#if __HAVE_DMA_FREELIST
+ DRM(freelist_create)( &entry->freelist, entry->buf_count );
+ for ( i = 0 ; i < entry->buf_count ; i++ ) {
+ DRM(freelist_put)( dev, &entry->freelist, &entry->buflist[i] );
+ }
+#endif
+ DRM_OS_UNLOCK;
+
+ request.count = entry->buf_count;
+ request.size = size;
+
+ DRM_OS_KRNTOUSR( (drm_buf_desc_t *)data, request, sizeof(request) );
+
+ atomic_dec( &dev->buf_alloc );
+ return 0;
+
+}
+#endif /* __HAVE_PCI_DMA */
+
+#if __REALLY_HAVE_SG
+int DRM(addbufs_sg)( DRM_OS_IOCTL )
+{
+ DRM_OS_DEVICE;
+ drm_device_dma_t *dma = dev->dma;
+ drm_buf_desc_t request;
+ drm_buf_entry_t *entry;
+ drm_buf_t *buf;
+ unsigned long offset;
+ unsigned long agp_offset;
+ int count;
+ int order;
+ int size;
+ int alignment;
+ int page_order;
+ int total;
+ int byte_count;
+ int i;
+ drm_buf_t **temp_buflist;
+
+ if ( !dma ) DRM_OS_RETURN(EINVAL);
+
+ DRM_OS_KRNFROMUSR( request, (drm_buf_desc_t *)data, sizeof(request) );
+
+ count = request.count;
+ order = DRM(order)( request.size );
+ size = 1 << order;
+
+ alignment = (request.flags & _DRM_PAGE_ALIGN)
+ ? round_page(size) : size;
+ page_order = order - PAGE_SHIFT > 0 ? order - PAGE_SHIFT : 0;
+ total = PAGE_SIZE << page_order;
+
+ byte_count = 0;
+ agp_offset = request.agp_start;
+
+ DRM_DEBUG( "count: %d\n", count );
+ DRM_DEBUG( "order: %d\n", order );
+ DRM_DEBUG( "size: %d\n", size );
+ DRM_DEBUG( "agp_offset: %ld\n", agp_offset );
+ DRM_DEBUG( "alignment: %d\n", alignment );
+ DRM_DEBUG( "page_order: %d\n", page_order );
+ DRM_DEBUG( "total: %d\n", total );
+
+ if ( order < DRM_MIN_ORDER || order > DRM_MAX_ORDER )
+ DRM_OS_RETURN(EINVAL);
+ if ( dev->queue_count ) DRM_OS_RETURN(EBUSY); /* Not while in use */
+
+ DRM_OS_SPINLOCK( &dev->count_lock );
+ if ( dev->buf_use ) {
+ DRM_OS_SPINUNLOCK( &dev->count_lock );
+ DRM_OS_RETURN(EBUSY);
+ }
+ atomic_inc( &dev->buf_alloc );
+ DRM_OS_SPINUNLOCK( &dev->count_lock );
+
+ DRM_OS_LOCK;
+ entry = &dma->bufs[order];
+ if ( entry->buf_count ) {
+ DRM_OS_UNLOCK;
+ atomic_dec( &dev->buf_alloc );
+ DRM_OS_RETURN(ENOMEM); /* May only call once for each order */
+ }
+
+ if (count < 0 || count > 4096) {
+ DRM_OS_UNLOCK;
+ atomic_dec( &dev->buf_alloc );
+ DRM_OS_RETURN(EINVAL);
+ }
+
+ entry->buflist = DRM(alloc)( count * sizeof(*entry->buflist),
+ DRM_MEM_BUFS );
+ if ( !entry->buflist ) {
+ DRM_OS_UNLOCK;
+ atomic_dec( &dev->buf_alloc );
+ DRM_OS_RETURN(ENOMEM);
+ }
+ memset( entry->buflist, 0, count * sizeof(*entry->buflist) );
+
+ entry->buf_size = size;
+ entry->page_order = page_order;
+
+ offset = 0;
+
+ while ( entry->buf_count < count ) {
+ buf = &entry->buflist[entry->buf_count];
+ buf->idx = dma->buf_count + entry->buf_count;
+ buf->total = alignment;
+ buf->order = order;
+ buf->used = 0;
+
+ buf->offset = (dma->byte_count + offset);
+ buf->bus_address = agp_offset + offset;
+ buf->address = (void *)(agp_offset + offset + dev->sg->handle);
+ buf->next = NULL;
+ buf->waiting = 0;
+ buf->pending = 0;
+ buf->dma_wait = 0;
+ buf->pid = 0;
+
+ buf->dev_priv_size = sizeof(DRIVER_BUF_PRIV_T);
+ buf->dev_private = DRM(alloc)( sizeof(DRIVER_BUF_PRIV_T),
+ DRM_MEM_BUFS );
+ if(!buf->dev_private) {
+ /* Set count correctly so we free the proper amount. */
+ entry->buf_count = count;
+ DRM(cleanup_buf_error)(entry);
+ DRM_OS_UNLOCK;
+ atomic_dec( &dev->buf_alloc );
+ DRM_OS_RETURN(ENOMEM);
+ }
+
+ memset( buf->dev_private, 0, buf->dev_priv_size );
+
+# if __HAVE_DMA_HISTOGRAM
+ buf->time_queued = 0;
+ buf->time_dispatched = 0;
+ buf->time_completed = 0;
+ buf->time_freed = 0;
+# endif
+ DRM_DEBUG( "buffer %d @ %p\n",
+ entry->buf_count, buf->address );
+
+ offset += alignment;
+ entry->buf_count++;
+ byte_count += PAGE_SIZE << page_order;
+ }
+
+ DRM_DEBUG( "byte_count: %d\n", byte_count );
+
+ temp_buflist = DRM(realloc)( dma->buflist,
+ dma->buf_count * sizeof(*dma->buflist),
+ (dma->buf_count + entry->buf_count)
+ * sizeof(*dma->buflist),
+ DRM_MEM_BUFS );
+ if(!temp_buflist) {
+ /* Free the entry because it isn't valid */
+ DRM(cleanup_buf_error)(entry);
+ DRM_OS_UNLOCK;
+ atomic_dec( &dev->buf_alloc );
+ DRM_OS_RETURN(ENOMEM);
+ }
+ dma->buflist = temp_buflist;
+
+ for ( i = 0 ; i < entry->buf_count ; i++ ) {
+ dma->buflist[i + dma->buf_count] = &entry->buflist[i];
+ }
+
+ dma->buf_count += entry->buf_count;
+ dma->byte_count += byte_count;
+
+ DRM_DEBUG( "dma->buf_count : %d\n", dma->buf_count );
+ DRM_DEBUG( "entry->buf_count : %d\n", entry->buf_count );
+
+#if __HAVE_DMA_FREELIST
+ DRM(freelist_create)( &entry->freelist, entry->buf_count );
+ for ( i = 0 ; i < entry->buf_count ; i++ ) {
+ DRM(freelist_put)( dev, &entry->freelist, &entry->buflist[i] );
+ }
+#endif
+ DRM_OS_UNLOCK;
+
+ request.count = entry->buf_count;
+ request.size = size;
+
+ DRM_OS_KRNTOUSR( (drm_buf_desc_t *)data, request, sizeof(request) );
+
+ dma->flags = _DRM_DMA_USE_SG;
+
+ atomic_dec( &dev->buf_alloc );
+ return 0;
+}
+#endif /* __REALLY_HAVE_SG */
+
+int DRM(addbufs)( DRM_OS_IOCTL )
+{
+ drm_buf_desc_t request;
+
+ DRM_OS_KRNFROMUSR( request, (drm_buf_desc_t *)data, sizeof(request) );
+
+#if __REALLY_HAVE_AGP
+ if ( request.flags & _DRM_AGP_BUFFER )
+ return DRM(addbufs_agp)( kdev, cmd, data, flags, p );
+ else
+#endif
+#if __REALLY_HAVE_SG
+ if ( request.flags & _DRM_SG_BUFFER )
+ return DRM(addbufs_sg)( kdev, cmd, data, flags, p );
+ else
+#endif
+#if __HAVE_PCI_DMA
+ return DRM(addbufs_pci)( kdev, cmd, data, flags, p );
+#else
+ DRM_OS_RETURN(EINVAL);
+#endif
+}
+
+int DRM(infobufs)( DRM_OS_IOCTL )
+{
+ DRM_OS_DEVICE;
+ drm_device_dma_t *dma = dev->dma;
+ drm_buf_info_t request;
+ int i;
+ int count;
+
+ if ( !dma ) DRM_OS_RETURN(EINVAL);
+
+ DRM_OS_SPINLOCK( &dev->count_lock );
+ if ( atomic_read( &dev->buf_alloc ) ) {
+ DRM_OS_SPINUNLOCK( &dev->count_lock );
+ DRM_OS_RETURN(EBUSY);
+ }
+ ++dev->buf_use; /* Can't allocate more after this call */
+ DRM_OS_SPINUNLOCK( &dev->count_lock );
+
+ DRM_OS_KRNFROMUSR( request, (drm_buf_info_t *)data, sizeof(request) );
+
+ for ( i = 0, count = 0 ; i < DRM_MAX_ORDER + 1 ; i++ ) {
+ if ( dma->bufs[i].buf_count ) ++count;
+ }
+
+ DRM_DEBUG( "count = %d\n", count );
+
+ if ( request.count >= count ) {
+ for ( i = 0, count = 0 ; i < DRM_MAX_ORDER + 1 ; i++ ) {
+ if ( dma->bufs[i].buf_count ) {
+ drm_buf_desc_t *to = &request.list[count];
+ drm_buf_entry_t *from = &dma->bufs[i];
+ drm_freelist_t *list = &dma->bufs[i].freelist;
+ if ( DRM_OS_COPYTOUSR( &to->count,
+ &from->buf_count,
+ sizeof(from->buf_count) ) ||
+ DRM_OS_COPYTOUSR( &to->size,
+ &from->buf_size,
+ sizeof(from->buf_size) ) ||
+ DRM_OS_COPYTOUSR( &to->low_mark,
+ &list->low_mark,
+ sizeof(list->low_mark) ) ||
+ DRM_OS_COPYTOUSR( &to->high_mark,
+ &list->high_mark,
+ sizeof(list->high_mark) ) )
+ DRM_OS_RETURN(EFAULT);
+
+ DRM_DEBUG( "%d %d %d %d %d\n",
+ i,
+ dma->bufs[i].buf_count,
+ dma->bufs[i].buf_size,
+ dma->bufs[i].freelist.low_mark,
+ dma->bufs[i].freelist.high_mark );
+ ++count;
+ }
+ }
+ }
+ request.count = count;
+
+ DRM_OS_KRNTOUSR( (drm_buf_info_t *)data, request, sizeof(request) );
+
+ return 0;
+}
+
+int DRM(markbufs)( DRM_OS_IOCTL )
+{
+ DRM_OS_DEVICE;
+ drm_device_dma_t *dma = dev->dma;
+ drm_buf_desc_t request;
+ int order;
+ drm_buf_entry_t *entry;
+
+ if ( !dma ) DRM_OS_RETURN(EINVAL);
+
+ DRM_OS_KRNFROMUSR( request, (drm_buf_desc_t *)data, sizeof(request) );
+
+ DRM_DEBUG( "%d, %d, %d\n",
+ request.size, request.low_mark, request.high_mark );
+ order = DRM(order)( request.size );
+ if ( order < DRM_MIN_ORDER || order > DRM_MAX_ORDER )
+ DRM_OS_RETURN(EINVAL);
+ entry = &dma->bufs[order];
+
+ if ( request.low_mark < 0 || request.low_mark > entry->buf_count )
+ DRM_OS_RETURN(EINVAL);
+ if ( request.high_mark < 0 || request.high_mark > entry->buf_count )
+ DRM_OS_RETURN(EINVAL);
+
+ entry->freelist.low_mark = request.low_mark;
+ entry->freelist.high_mark = request.high_mark;
+
+ return 0;
+}
+
+int DRM(freebufs)( DRM_OS_IOCTL )
+{
+ DRM_OS_DEVICE;
+ drm_device_dma_t *dma = dev->dma;
+ drm_buf_free_t request;
+ int i;
+ int idx;
+ drm_buf_t *buf;
+
+ if ( !dma ) DRM_OS_RETURN(EINVAL);
+
+ DRM_OS_KRNFROMUSR( request, (drm_buf_free_t *)data, sizeof(request) );
+
+ DRM_DEBUG( "%d\n", request.count );
+ for ( i = 0 ; i < request.count ; i++ ) {
+ if ( DRM_OS_COPYFROMUSR( &idx,
+ &request.list[i],
+ sizeof(idx) ) )
+ DRM_OS_RETURN(EFAULT);
+ if ( idx < 0 || idx >= dma->buf_count ) {
+ DRM_ERROR( "Index %d (of %d max)\n",
+ idx, dma->buf_count - 1 );
+ DRM_OS_RETURN(EINVAL);
+ }
+ buf = dma->buflist[idx];
+ if ( buf->pid != DRM_OS_CURRENTPID ) {
+ DRM_ERROR( "Process %d freeing buffer owned by %d\n",
+ DRM_OS_CURRENTPID, buf->pid );
+ DRM_OS_RETURN(EINVAL);
+ }
+ DRM(free_buffer)( dev, buf );
+ }
+
+ return 0;
+}
+
+int DRM(mapbufs)( DRM_OS_IOCTL )
+{
+ DRM_OS_DEVICE;
+ drm_device_dma_t *dma = dev->dma;
+ int retcode = 0;
+ const int zero = 0;
+ vm_offset_t virtual, address;
+#if __FreeBSD_version >= 500000
+ struct vmspace *vms = p->td_proc->p_vmspace;
+#else
+ struct vmspace *vms = p->p_vmspace;
+#endif
+ drm_buf_map_t request;
+ int i;
+
+ if ( !dma ) DRM_OS_RETURN(EINVAL);
+
+ DRM_OS_SPINLOCK( &dev->count_lock );
+ if ( atomic_read( &dev->buf_alloc ) ) {
+ DRM_OS_SPINUNLOCK( &dev->count_lock );
+ DRM_OS_RETURN(EBUSY);
+ }
+ dev->buf_use++; /* Can't allocate more after this call */
+ DRM_OS_SPINUNLOCK( &dev->count_lock );
+
+ DRM_OS_KRNFROMUSR( request, (drm_buf_map_t *)data, sizeof(request) );
+
+ if ( request.count >= dma->buf_count ) {
+ if ( (__HAVE_AGP && (dma->flags & _DRM_DMA_USE_AGP)) ||
+ (__HAVE_SG && (dma->flags & _DRM_DMA_USE_SG)) ) {
+ drm_map_t *map = DRIVER_AGP_BUFFERS_MAP( dev );
+
+ if ( !map ) {
+ retcode = EINVAL;
+ goto done;
+ }
+
+ virtual = round_page((vm_offset_t)vms->vm_daddr + MAXDSIZ);
+ retcode = vm_mmap(&vms->vm_map,
+ &virtual,
+ round_page(map->size),
+ PROT_READ|PROT_WRITE, VM_PROT_ALL,
+ MAP_SHARED,
+ SLIST_FIRST(&kdev->si_hlist),
+ (unsigned long)map->offset );
+ } else {
+ virtual = round_page((vm_offset_t)vms->vm_daddr + MAXDSIZ);
+ retcode = vm_mmap(&vms->vm_map,
+ &virtual,
+ round_page(dma->byte_count),
+ PROT_READ|PROT_WRITE, VM_PROT_ALL,
+ MAP_SHARED,
+ SLIST_FIRST(&kdev->si_hlist),
+ 0);
+ }
+ if (retcode)
+ goto done;
+ request.virtual = (void *)virtual;
+
+ for ( i = 0 ; i < dma->buf_count ; i++ ) {
+ if ( DRM_OS_COPYTOUSR( &request.list[i].idx,
+ &dma->buflist[i]->idx,
+ sizeof(request.list[0].idx) ) ) {
+ retcode = EFAULT;
+ goto done;
+ }
+ if ( DRM_OS_COPYTOUSR( &request.list[i].total,
+ &dma->buflist[i]->total,
+ sizeof(request.list[0].total) ) ) {
+ retcode = EFAULT;
+ goto done;
+ }
+ if ( DRM_OS_COPYTOUSR( &request.list[i].used,
+ &zero,
+ sizeof(zero) ) ) {
+ retcode = EFAULT;
+ goto done;
+ }
+ address = virtual + dma->buflist[i]->offset; /* *** */
+ if ( DRM_OS_COPYTOUSR( &request.list[i].address,
+ &address,
+ sizeof(address) ) ) {
+ retcode = EFAULT;
+ goto done;
+ }
+ }
+ }
+ done:
+ request.count = dma->buf_count;
+
+ DRM_DEBUG( "%d buffers, retcode = %d\n", request.count, retcode );
+
+ DRM_OS_KRNTOUSR( (drm_buf_map_t *)data, request, sizeof(request) );
+
+ DRM_OS_RETURN(retcode);
+}
+
+#endif /* __HAVE_DMA */
+
diff --git a/bsd-core/drm_context.c b/bsd-core/drm_context.c
new file mode 100644
index 00000000..8d676a23
--- /dev/null
+++ b/bsd-core/drm_context.c
@@ -0,0 +1,730 @@
+/* drm_context.h -- IOCTLs for generic contexts -*- linux-c -*-
+ * Created: Fri Nov 24 18:31:37 2000 by gareth@valinux.com
+ *
+ * Copyright 1999, 2000 Precision Insight, Inc., Cedar Park, Texas.
+ * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors:
+ * Rickard E. (Rik) Faith <faith@valinux.com>
+ * Gareth Hughes <gareth@valinux.com>
+ */
+
+#define __NO_VERSION__
+#include "drmP.h"
+
+#if __HAVE_CTX_BITMAP
+
+/* ================================================================
+ * Context bitmap support
+ */
+
+void DRM(ctxbitmap_free)( drm_device_t *dev, int ctx_handle )
+{
+ if ( ctx_handle < 0 ) goto failed;
+ if ( !dev->ctx_bitmap ) goto failed;
+
+ if ( ctx_handle < DRM_MAX_CTXBITMAP ) {
+ DRM_OS_LOCK;
+ clear_bit( ctx_handle, dev->ctx_bitmap );
+ dev->context_sareas[ctx_handle] = NULL;
+ DRM_OS_UNLOCK;
+ return;
+ }
+failed:
+ DRM_ERROR( "Attempt to free invalid context handle: %d\n",
+ ctx_handle );
+ return;
+}
+
+int DRM(ctxbitmap_next)( drm_device_t *dev )
+{
+ int bit;
+
+ if(!dev->ctx_bitmap) return -1;
+
+ DRM_OS_LOCK;
+ bit = find_first_zero_bit( dev->ctx_bitmap, DRM_MAX_CTXBITMAP );
+ if ( bit < DRM_MAX_CTXBITMAP ) {
+ set_bit( bit, dev->ctx_bitmap );
+ DRM_DEBUG( "drm_ctxbitmap_next bit : %d\n", bit );
+ if((bit+1) > dev->max_context) {
+ dev->max_context = (bit+1);
+ if(dev->context_sareas) {
+ drm_map_t **ctx_sareas;
+
+ ctx_sareas = DRM(realloc)(dev->context_sareas,
+ (dev->max_context - 1) *
+ sizeof(*dev->context_sareas),
+ dev->max_context *
+ sizeof(*dev->context_sareas),
+ DRM_MEM_MAPS);
+ if(!ctx_sareas) {
+ clear_bit(bit, dev->ctx_bitmap);
+ DRM_OS_UNLOCK;
+ return -1;
+ }
+ dev->context_sareas = ctx_sareas;
+ dev->context_sareas[bit] = NULL;
+ } else {
+ /* max_context == 1 at this point */
+ dev->context_sareas = DRM(alloc)(
+ dev->max_context *
+ sizeof(*dev->context_sareas),
+ DRM_MEM_MAPS);
+ if(!dev->context_sareas) {
+ clear_bit(bit, dev->ctx_bitmap);
+ DRM_OS_UNLOCK;
+ return -1;
+ }
+ dev->context_sareas[bit] = NULL;
+ }
+ }
+ DRM_OS_UNLOCK;
+ return bit;
+ }
+ DRM_OS_UNLOCK;
+ return -1;
+}
+
+int DRM(ctxbitmap_init)( drm_device_t *dev )
+{
+ int i;
+ int temp;
+
+ DRM_OS_LOCK;
+ dev->ctx_bitmap = (unsigned long *) DRM(alloc)( PAGE_SIZE,
+ DRM_MEM_CTXBITMAP );
+ if ( dev->ctx_bitmap == NULL ) {
+ DRM_OS_UNLOCK;
+ DRM_OS_RETURN(ENOMEM);
+ }
+ memset( (void *)dev->ctx_bitmap, 0, PAGE_SIZE );
+ dev->context_sareas = NULL;
+ dev->max_context = -1;
+ DRM_OS_UNLOCK;
+
+ for ( i = 0 ; i < DRM_RESERVED_CONTEXTS ; i++ ) {
+ temp = DRM(ctxbitmap_next)( dev );
+ DRM_DEBUG( "drm_ctxbitmap_init : %d\n", temp );
+ }
+
+ return 0;
+}
+
+void DRM(ctxbitmap_cleanup)( drm_device_t *dev )
+{
+ DRM_OS_LOCK;
+ if( dev->context_sareas ) DRM(free)( dev->context_sareas,
+ sizeof(*dev->context_sareas) *
+ dev->max_context,
+ DRM_MEM_MAPS );
+ DRM(free)( (void *)dev->ctx_bitmap, PAGE_SIZE, DRM_MEM_CTXBITMAP );
+ DRM_OS_UNLOCK;
+}
+
+/* ================================================================
+ * Per Context SAREA Support
+ */
+
+int DRM(getsareactx)( DRM_OS_IOCTL )
+{
+ DRM_OS_DEVICE;
+ drm_ctx_priv_map_t request;
+ drm_map_t *map;
+
+ DRM_OS_KRNFROMUSR( request, (drm_ctx_priv_map_t *)data,
+ sizeof(request) );
+
+ DRM_OS_LOCK;
+ if (dev->max_context < 0 || request.ctx_id >= (unsigned) dev->max_context) {
+ DRM_OS_UNLOCK;
+ DRM_OS_RETURN(EINVAL);
+ }
+
+ map = dev->context_sareas[request.ctx_id];
+ DRM_OS_UNLOCK;
+
+ request.handle = map->handle;
+
+ DRM_OS_KRNTOUSR( (drm_ctx_priv_map_t *)data, request, sizeof(request) );
+
+ return 0;
+}
+
+int DRM(setsareactx)( DRM_OS_IOCTL )
+{
+ DRM_OS_DEVICE;
+ drm_ctx_priv_map_t request;
+ drm_map_t *map = NULL;
+ drm_map_list_entry_t *list;
+
+ DRM_OS_KRNFROMUSR( request, (drm_ctx_priv_map_t *)data,
+ sizeof(request) );
+
+ DRM_OS_LOCK;
+ TAILQ_FOREACH(list, dev->maplist, link) {
+ map=list->map;
+ if(map->handle == request.handle)
+ goto found;
+ }
+
+bad:
+ DRM_OS_UNLOCK;
+ return -EINVAL;
+
+found:
+ map = list->map;
+ if (!map) goto bad;
+ if (dev->max_context < 0)
+ goto bad;
+ if (request.ctx_id >= (unsigned) dev->max_context)
+ goto bad;
+ dev->context_sareas[request.ctx_id] = map;
+ DRM_OS_UNLOCK;
+ return 0;
+}
+
+/* ================================================================
+ * The actual DRM context handling routines
+ */
+
+int DRM(context_switch)( drm_device_t *dev, int old, int new )
+{
+ char buf[64];
+
+ if ( test_and_set_bit( 0, &dev->context_flag ) ) {
+ DRM_ERROR( "Reentering -- FIXME\n" );
+ DRM_OS_RETURN(EBUSY);
+ }
+
+#if __HAVE_DMA_HISTOGRAM
+ dev->ctx_start = get_cycles();
+#endif
+
+ DRM_DEBUG( "Context switch from %d to %d\n", old, new );
+
+ if ( new == dev->last_context ) {
+ clear_bit( 0, &dev->context_flag );
+ return 0;
+ }
+
+ if ( DRM(flags) & DRM_FLAG_NOCTX ) {
+ DRM(context_switch_complete)( dev, new );
+ } else {
+ sprintf( buf, "C %d %d\n", old, new );
+ DRM(write_string)( dev, buf );
+ }
+
+ return 0;
+}
+
+int DRM(context_switch_complete)( drm_device_t *dev, int new )
+{
+ dev->last_context = new; /* PRE/POST: This is the _only_ writer. */
+ dev->last_switch = jiffies;
+
+ if ( !_DRM_LOCK_IS_HELD(dev->lock.hw_lock->lock) ) {
+ DRM_ERROR( "Lock isn't held after context switch\n" );
+ }
+
+ /* If a context switch is ever initiated
+ when the kernel holds the lock, release
+ that lock here. */
+#if __HAVE_DMA_HISTOGRAM
+ atomic_inc( &dev->histo.ctx[DRM(histogram_slot)(get_cycles()
+ - dev->ctx_start)] );
+
+#endif
+ clear_bit( 0, &dev->context_flag );
+ DRM_OS_WAKEUP( &dev->context_wait );
+
+ return 0;
+}
+
+int DRM(resctx)( DRM_OS_IOCTL )
+{
+ drm_ctx_res_t res;
+ drm_ctx_t ctx;
+ int i;
+
+ DRM_OS_KRNFROMUSR( res, (drm_ctx_res_t *)data, sizeof(res) );
+
+ if ( res.count >= DRM_RESERVED_CONTEXTS ) {
+ memset( &ctx, 0, sizeof(ctx) );
+ for ( i = 0 ; i < DRM_RESERVED_CONTEXTS ; i++ ) {
+ ctx.handle = i;
+ if ( DRM_OS_COPYTOUSR( &res.contexts[i],
+ &i, sizeof(i) ) )
+ DRM_OS_RETURN(EFAULT);
+ }
+ }
+ res.count = DRM_RESERVED_CONTEXTS;
+
+ DRM_OS_KRNTOUSR( (drm_ctx_res_t *)data, res, sizeof(res) );
+
+ return 0;
+}
+
+int DRM(addctx)( DRM_OS_IOCTL )
+{
+ DRM_OS_DEVICE;
+ drm_ctx_t ctx;
+
+ DRM_OS_KRNFROMUSR( ctx, (drm_ctx_t *)data, sizeof(ctx) );
+
+ ctx.handle = DRM(ctxbitmap_next)( dev );
+ if ( ctx.handle == DRM_KERNEL_CONTEXT ) {
+ /* Skip kernel's context and get a new one. */
+ ctx.handle = DRM(ctxbitmap_next)( dev );
+ }
+ DRM_DEBUG( "%d\n", ctx.handle );
+ if ( ctx.handle == -1 ) {
+ DRM_DEBUG( "Not enough free contexts.\n" );
+ /* Should this return -EBUSY instead? */
+ DRM_OS_RETURN(ENOMEM);
+ }
+
+ DRM_OS_KRNTOUSR( (drm_ctx_t *)data, ctx, sizeof(ctx) );
+
+ return 0;
+}
+
+int DRM(modctx)( DRM_OS_IOCTL )
+{
+ /* This does nothing */
+ return 0;
+}
+
+int DRM(getctx)( DRM_OS_IOCTL )
+{
+ drm_ctx_t ctx;
+
+ DRM_OS_KRNFROMUSR( ctx, (drm_ctx_t *)data, sizeof(ctx) );
+
+ /* This is 0, because we don't handle any context flags */
+ ctx.flags = 0;
+
+ DRM_OS_KRNTOUSR( (drm_ctx_t *)data, ctx, sizeof(ctx) );
+
+ return 0;
+}
+
+int DRM(switchctx)( DRM_OS_IOCTL )
+{
+ DRM_OS_DEVICE;
+ drm_ctx_t ctx;
+
+ DRM_OS_KRNFROMUSR( ctx, (drm_ctx_t *)data, sizeof(ctx) );
+
+ DRM_DEBUG( "%d\n", ctx.handle );
+ return DRM(context_switch)( dev, dev->last_context, ctx.handle );
+}
+
+int DRM(newctx)( DRM_OS_IOCTL )
+{
+ DRM_OS_DEVICE;
+ drm_ctx_t ctx;
+
+ DRM_OS_KRNFROMUSR( ctx, (drm_ctx_t *)data, sizeof(ctx) );
+
+ DRM_DEBUG( "%d\n", ctx.handle );
+ DRM(context_switch_complete)( dev, ctx.handle );
+
+ return 0;
+}
+
+int DRM(rmctx)( DRM_OS_IOCTL )
+{
+ DRM_OS_DEVICE;
+ drm_ctx_t ctx;
+
+ DRM_OS_KRNFROMUSR( ctx, (drm_ctx_t *)data, sizeof(ctx) );
+
+ DRM_DEBUG( "%d\n", ctx.handle );
+ if ( ctx.handle != DRM_KERNEL_CONTEXT ) {
+ DRM(ctxbitmap_free)( dev, ctx.handle );
+ }
+
+ return 0;
+}
+
+
+#else /* __HAVE_CTX_BITMAP */
+
+/* ================================================================
+ * Old-style context support
+ */
+
+
+int DRM(context_switch)(drm_device_t *dev, int old, int new)
+{
+ char buf[64];
+ drm_queue_t *q;
+
+#if 0
+ atomic_inc(&dev->total_ctx);
+#endif
+
+ if (test_and_set_bit(0, &dev->context_flag)) {
+ DRM_ERROR("Reentering -- FIXME\n");
+ DRM_OS_RETURN(EBUSY);
+ }
+
+#if __HAVE_DMA_HISTOGRAM
+ dev->ctx_start = get_cycles();
+#endif
+
+ DRM_DEBUG("Context switch from %d to %d\n", old, new);
+
+ if (new >= dev->queue_count) {
+ clear_bit(0, &dev->context_flag);
+ DRM_OS_RETURN(EINVAL);
+ }
+
+ if (new == dev->last_context) {
+ clear_bit(0, &dev->context_flag);
+ return 0;
+ }
+
+ q = dev->queuelist[new];
+ atomic_inc(&q->use_count);
+ if (atomic_read(&q->use_count) == 1) {
+ atomic_dec(&q->use_count);
+ clear_bit(0, &dev->context_flag);
+ DRM_OS_RETURN(EINVAL);
+ }
+
+ if (DRM(flags) & DRM_FLAG_NOCTX) {
+ DRM(context_switch_complete)(dev, new);
+ } else {
+ sprintf(buf, "C %d %d\n", old, new);
+ DRM(write_string)(dev, buf);
+ }
+
+ atomic_dec(&q->use_count);
+
+ return 0;
+}
+
+int DRM(context_switch_complete)(drm_device_t *dev, int new)
+{
+ drm_device_dma_t *dma = dev->dma;
+
+ dev->last_context = new; /* PRE/POST: This is the _only_ writer. */
+ dev->last_switch = jiffies;
+
+ if (!_DRM_LOCK_IS_HELD(dev->lock.hw_lock->lock)) {
+ DRM_ERROR("Lock isn't held after context switch\n");
+ }
+
+ if (!dma || !(dma->next_buffer && dma->next_buffer->while_locked)) {
+ if (DRM(lock_free)(dev, &dev->lock.hw_lock->lock,
+ DRM_KERNEL_CONTEXT)) {
+ DRM_ERROR("Cannot free lock\n");
+ }
+ }
+
+#if __HAVE_DMA_HISTOGRAM
+ atomic_inc(&dev->histo.ctx[DRM(histogram_slot)(get_cycles()
+ - dev->ctx_start)]);
+
+#endif
+ clear_bit(0, &dev->context_flag);
+ DRM_OS_WAKEUP_INT(&dev->context_wait);
+
+ return 0;
+}
+
+static int DRM(init_queue)(drm_device_t *dev, drm_queue_t *q, drm_ctx_t *ctx)
+{
+ DRM_DEBUG("\n");
+
+ if (atomic_read(&q->use_count) != 1
+ || atomic_read(&q->finalization)
+ || atomic_read(&q->block_count)) {
+ DRM_ERROR("New queue is already in use: u%ld f%ld b%ld\n",
+ (unsigned long)atomic_read(&q->use_count),
+ (unsigned long)atomic_read(&q->finalization),
+ (unsigned long)atomic_read(&q->block_count));
+ }
+
+ atomic_set(&q->finalization, 0);
+ atomic_set(&q->block_count, 0);
+ atomic_set(&q->block_read, 0);
+ atomic_set(&q->block_write, 0);
+ atomic_set(&q->total_queued, 0);
+ atomic_set(&q->total_flushed, 0);
+ atomic_set(&q->total_locks, 0);
+
+ q->write_queue = 0;
+ q->read_queue = 0;
+ q->flush_queue = 0;
+
+ q->flags = ctx->flags;
+
+ DRM(waitlist_create)(&q->waitlist, dev->dma->buf_count);
+
+ return 0;
+}
+
+
+/* drm_alloc_queue:
+PRE: 1) dev->queuelist[0..dev->queue_count] is allocated and will not
+ disappear (so all deallocation must be done after IOCTLs are off)
+ 2) dev->queue_count < dev->queue_slots
+ 3) dev->queuelist[i].use_count == 0 and
+ dev->queuelist[i].finalization == 0 if i not in use
+POST: 1) dev->queuelist[i].use_count == 1
+ 2) dev->queue_count < dev->queue_slots */
+
+static int DRM(alloc_queue)(drm_device_t *dev)
+{
+ int i;
+ drm_queue_t *queue;
+ int oldslots;
+ int newslots;
+ /* Check for a free queue */
+ for (i = 0; i < dev->queue_count; i++) {
+ atomic_inc(&dev->queuelist[i]->use_count);
+ if (atomic_read(&dev->queuelist[i]->use_count) == 1
+ && !atomic_read(&dev->queuelist[i]->finalization)) {
+ DRM_DEBUG("%d (free)\n", i);
+ return i;
+ }
+ atomic_dec(&dev->queuelist[i]->use_count);
+ }
+ /* Allocate a new queue */
+ DRM_OS_LOCK;
+
+ queue = gamma_alloc(sizeof(*queue), DRM_MEM_QUEUES);
+ memset(queue, 0, sizeof(*queue));
+ atomic_set(&queue->use_count, 1);
+
+ ++dev->queue_count;
+ if (dev->queue_count >= dev->queue_slots) {
+ oldslots = dev->queue_slots * sizeof(*dev->queuelist);
+ if (!dev->queue_slots) dev->queue_slots = 1;
+ dev->queue_slots *= 2;
+ newslots = dev->queue_slots * sizeof(*dev->queuelist);
+
+ dev->queuelist = DRM(realloc)(dev->queuelist,
+ oldslots,
+ newslots,
+ DRM_MEM_QUEUES);
+ if (!dev->queuelist) {
+ DRM_OS_UNLOCK;
+ DRM_DEBUG("out of memory\n");
+ DRM_OS_RETURN(ENOMEM);
+ }
+ }
+ dev->queuelist[dev->queue_count-1] = queue;
+
+ DRM_OS_UNLOCK;
+ DRM_DEBUG("%d (new)\n", dev->queue_count - 1);
+ return dev->queue_count - 1;
+}
+
+int DRM(resctx)( DRM_OS_IOCTL )
+{
+ drm_ctx_res_t res;
+ drm_ctx_t ctx;
+ int i;
+
+ DRM_DEBUG("%d\n", DRM_RESERVED_CONTEXTS);
+
+ DRM_OS_KRNFROMUSR( res, (drm_ctx_res_t *)data, sizeof(res) );
+
+ if (res.count >= DRM_RESERVED_CONTEXTS) {
+ memset(&ctx, 0, sizeof(ctx));
+ for (i = 0; i < DRM_RESERVED_CONTEXTS; i++) {
+ ctx.handle = i;
+ if (DRM_OS_COPYTOUSR(&res.contexts[i],
+ &i,
+ sizeof(i)))
+ DRM_OS_RETURN(EFAULT);
+ }
+ }
+ res.count = DRM_RESERVED_CONTEXTS;
+
+ DRM_OS_KRNTOUSR( (drm_ctx_res_t *)data, res, sizeof(res) );
+
+ return 0;
+}
+
+int DRM(addctx)( DRM_OS_IOCTL )
+{
+ DRM_OS_DEVICE;
+ drm_ctx_t ctx;
+
+ DRM_OS_KRNFROMUSR( ctx, (drm_ctx_t *)data, sizeof(ctx) );
+
+ if ((ctx.handle = DRM(alloc_queue)(dev)) == DRM_KERNEL_CONTEXT) {
+ /* Init kernel's context and get a new one. */
+ DRM(init_queue)(dev, dev->queuelist[ctx.handle], &ctx);
+ ctx.handle = DRM(alloc_queue)(dev);
+ }
+ DRM(init_queue)(dev, dev->queuelist[ctx.handle], &ctx);
+ DRM_DEBUG("%d\n", ctx.handle);
+
+ DRM_OS_KRNTOUSR( (drm_ctx_t *)data, ctx, sizeof(ctx) );
+
+ return 0;
+}
+
+int DRM(modctx)( DRM_OS_IOCTL )
+{
+ DRM_OS_DEVICE;
+ drm_ctx_t ctx;
+ drm_queue_t *q;
+
+ DRM_OS_KRNFROMUSR( ctx, (drm_ctx_t *)data, sizeof(ctx) );
+
+ DRM_DEBUG("%d\n", ctx.handle);
+
+ if (ctx.handle < 0 || ctx.handle >= dev->queue_count)
+ DRM_OS_RETURN(EINVAL);
+ q = dev->queuelist[ctx.handle];
+
+ atomic_inc(&q->use_count);
+ if (atomic_read(&q->use_count) == 1) {
+ /* No longer in use */
+ atomic_dec(&q->use_count);
+ DRM_OS_RETURN(EINVAL);
+ }
+
+ if (DRM_BUFCOUNT(&q->waitlist)) {
+ atomic_dec(&q->use_count);
+ DRM_OS_RETURN(EBUSY);
+ }
+
+ q->flags = ctx.flags;
+
+ atomic_dec(&q->use_count);
+ return 0;
+}
+
+int DRM(getctx)( DRM_OS_IOCTL )
+{
+ DRM_OS_DEVICE;
+ drm_ctx_t ctx;
+ drm_queue_t *q;
+
+ DRM_OS_KRNFROMUSR( ctx, (drm_ctx_t *)data, sizeof(ctx) );
+
+ DRM_DEBUG("%d\n", ctx.handle);
+
+ if (ctx.handle >= dev->queue_count)
+ DRM_OS_RETURN(EINVAL);
+ q = dev->queuelist[ctx.handle];
+
+ atomic_inc(&q->use_count);
+ if (atomic_read(&q->use_count) == 1) {
+ /* No longer in use */
+ atomic_dec(&q->use_count);
+ DRM_OS_RETURN(EINVAL);
+ }
+
+ ctx.flags = q->flags;
+ atomic_dec(&q->use_count);
+
+ DRM_OS_KRNTOUSR( (drm_ctx_t *)data, ctx, sizeof(ctx) );
+
+ return 0;
+}
+
+int DRM(switchctx)( DRM_OS_IOCTL )
+{
+ DRM_OS_DEVICE;
+ drm_ctx_t ctx;
+
+ DRM_OS_KRNFROMUSR( ctx, (drm_ctx_t *)data, sizeof(ctx) );
+
+ DRM_DEBUG("%d\n", ctx.handle);
+ return DRM(context_switch)(dev, dev->last_context, ctx.handle);
+}
+
+int DRM(newctx)( DRM_OS_IOCTL )
+{
+ DRM_OS_DEVICE;
+ drm_ctx_t ctx;
+
+ DRM_OS_KRNFROMUSR( ctx, (drm_ctx_t *)data, sizeof(ctx) );
+
+ DRM_DEBUG("%d\n", ctx.handle);
+ DRM(context_switch_complete)(dev, ctx.handle);
+
+ return 0;
+}
+
+int DRM(rmctx)( DRM_OS_IOCTL )
+{
+ DRM_OS_DEVICE;
+ drm_ctx_t ctx;
+ drm_queue_t *q;
+ drm_buf_t *buf;
+
+ DRM_OS_KRNFROMUSR( ctx, (drm_ctx_t *)data, sizeof(ctx) );
+
+ DRM_DEBUG("%d\n", ctx.handle);
+
+ if (ctx.handle >= dev->queue_count) DRM_OS_RETURN(EINVAL);
+ q = dev->queuelist[ctx.handle];
+
+ atomic_inc(&q->use_count);
+ if (atomic_read(&q->use_count) == 1) {
+ /* No longer in use */
+ atomic_dec(&q->use_count);
+ DRM_OS_RETURN(EINVAL);
+ }
+
+ atomic_inc(&q->finalization); /* Mark queue in finalization state */
+ atomic_sub(2, &q->use_count); /* Mark queue as unused (pending
+ finalization) */
+
+ while (test_and_set_bit(0, &dev->interrupt_flag)) {
+ static int never;
+ int retcode;
+ retcode = tsleep(&never, PZERO|PCATCH, "never", 1);
+ if (retcode)
+ return retcode;
+ }
+ /* Remove queued buffers */
+ while ((buf = DRM(waitlist_get)(&q->waitlist))) {
+ DRM(free_buffer)(dev, buf);
+ }
+ clear_bit(0, &dev->interrupt_flag);
+
+ /* Wakeup blocked processes */
+ wakeup( &q->block_read );
+ wakeup( &q->block_write );
+ DRM_OS_WAKEUP_INT( &q->flush_queue );
+ /* Finalization over. Queue is made
+ available when both use_count and
+ finalization become 0, which won't
+ happen until all the waiting processes
+ stop waiting. */
+ atomic_dec(&q->finalization);
+ return 0;
+}
+
+#endif /* __HAVE_CTX_BITMAP */
diff --git a/bsd-core/drm_dma.c b/bsd-core/drm_dma.c
new file mode 100644
index 00000000..e5aef241
--- /dev/null
+++ b/bsd-core/drm_dma.c
@@ -0,0 +1,605 @@
+/* drm_dma.c -- DMA IOCTL and function support -*- linux-c -*-
+ * Created: Fri Mar 19 14:30:16 1999 by faith@valinux.com
+ *
+ * Copyright 1999, 2000 Precision Insight, Inc., Cedar Park, Texas.
+ * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors:
+ * Rickard E. (Rik) Faith <faith@valinux.com>
+ * Gareth Hughes <gareth@valinux.com>
+ */
+
+#include <machine/bus.h>
+#include <machine/resource.h>
+#include <sys/rman.h>
+
+#include "drmP.h"
+
+#ifndef __HAVE_DMA_WAITQUEUE
+#define __HAVE_DMA_WAITQUEUE 0
+#endif
+#ifndef __HAVE_DMA_RECLAIM
+#define __HAVE_DMA_RECLAIM 0
+#endif
+#ifndef __HAVE_SHARED_IRQ
+#define __HAVE_SHARED_IRQ 0
+#endif
+
+#if __HAVE_SHARED_IRQ
+#define DRM_IRQ_TYPE SA_SHIRQ
+#else
+#define DRM_IRQ_TYPE 0
+#endif
+
+#if __HAVE_DMA
+
+int DRM(dma_setup)( drm_device_t *dev )
+{
+ int i;
+
+ dev->dma = DRM(alloc)( sizeof(*dev->dma), DRM_MEM_DRIVER );
+ if ( !dev->dma )
+ DRM_OS_RETURN(ENOMEM);
+
+ memset( dev->dma, 0, sizeof(*dev->dma) );
+
+ for ( i = 0 ; i <= DRM_MAX_ORDER ; i++ )
+ memset(&dev->dma->bufs[i], 0, sizeof(dev->dma->bufs[0]));
+
+ return 0;
+}
+
+void DRM(dma_takedown)(drm_device_t *dev)
+{
+ drm_device_dma_t *dma = dev->dma;
+ int i, j;
+
+ if (!dma) return;
+
+ /* Clear dma buffers */
+ for (i = 0; i <= DRM_MAX_ORDER; i++) {
+ if (dma->bufs[i].seg_count) {
+ DRM_DEBUG("order %d: buf_count = %d,"
+ " seg_count = %d\n",
+ i,
+ dma->bufs[i].buf_count,
+ dma->bufs[i].seg_count);
+ for (j = 0; j < dma->bufs[i].seg_count; j++) {
+ DRM(free_pages)(dma->bufs[i].seglist[j],
+ dma->bufs[i].page_order,
+ DRM_MEM_DMA);
+ }
+ DRM(free)(dma->bufs[i].seglist,
+ dma->bufs[i].seg_count
+ * sizeof(*dma->bufs[0].seglist),
+ DRM_MEM_SEGS);
+ }
+ if(dma->bufs[i].buf_count) {
+ for(j = 0; j < dma->bufs[i].buf_count; j++) {
+ if(dma->bufs[i].buflist[j].dev_private) {
+ DRM(free)(dma->bufs[i].buflist[j].dev_private,
+ dma->bufs[i].buflist[j].dev_priv_size,
+ DRM_MEM_BUFS);
+ }
+ }
+ DRM(free)(dma->bufs[i].buflist,
+ dma->bufs[i].buf_count *
+ sizeof(*dma->bufs[0].buflist),
+ DRM_MEM_BUFS);
+#if __HAVE_DMA_FREELIST
+ DRM(freelist_destroy)(&dma->bufs[i].freelist);
+#endif
+ }
+ }
+
+ if (dma->buflist) {
+ DRM(free)(dma->buflist,
+ dma->buf_count * sizeof(*dma->buflist),
+ DRM_MEM_BUFS);
+ }
+
+ if (dma->pagelist) {
+ DRM(free)(dma->pagelist,
+ dma->page_count * sizeof(*dma->pagelist),
+ DRM_MEM_PAGES);
+ }
+ DRM(free)(dev->dma, sizeof(*dev->dma), DRM_MEM_DRIVER);
+ dev->dma = NULL;
+}
+
+
+#if __HAVE_DMA_HISTOGRAM
+/* This is slow, but is useful for debugging. */
+int DRM(histogram_slot)(unsigned long count)
+{
+ int value = DRM_DMA_HISTOGRAM_INITIAL;
+ int slot;
+
+ for (slot = 0;
+ slot < DRM_DMA_HISTOGRAM_SLOTS;
+ ++slot, value = DRM_DMA_HISTOGRAM_NEXT(value)) {
+ if (count < value) return slot;
+ }
+ return DRM_DMA_HISTOGRAM_SLOTS - 1;
+}
+
+void DRM(histogram_compute)(drm_device_t *dev, drm_buf_t *buf)
+{
+ cycles_t queued_to_dispatched;
+ cycles_t dispatched_to_completed;
+ cycles_t completed_to_freed;
+ int q2d, d2c, c2f, q2c, q2f;
+
+ if (buf->time_queued) {
+ queued_to_dispatched = (buf->time_dispatched
+ - buf->time_queued);
+ dispatched_to_completed = (buf->time_completed
+ - buf->time_dispatched);
+ completed_to_freed = (buf->time_freed
+ - buf->time_completed);
+
+ q2d = DRM(histogram_slot)(queued_to_dispatched);
+ d2c = DRM(histogram_slot)(dispatched_to_completed);
+ c2f = DRM(histogram_slot)(completed_to_freed);
+
+ q2c = DRM(histogram_slot)(queued_to_dispatched
+ + dispatched_to_completed);
+ q2f = DRM(histogram_slot)(queued_to_dispatched
+ + dispatched_to_completed
+ + completed_to_freed);
+
+ atomic_inc(&dev->histo.total);
+ atomic_inc(&dev->histo.queued_to_dispatched[q2d]);
+ atomic_inc(&dev->histo.dispatched_to_completed[d2c]);
+ atomic_inc(&dev->histo.completed_to_freed[c2f]);
+
+ atomic_inc(&dev->histo.queued_to_completed[q2c]);
+ atomic_inc(&dev->histo.queued_to_freed[q2f]);
+
+ }
+ buf->time_queued = 0;
+ buf->time_dispatched = 0;
+ buf->time_completed = 0;
+ buf->time_freed = 0;
+}
+#endif
+
+void DRM(free_buffer)(drm_device_t *dev, drm_buf_t *buf)
+{
+ if (!buf) return;
+
+ buf->waiting = 0;
+ buf->pending = 0;
+ buf->pid = 0;
+ buf->used = 0;
+#if __HAVE_DMA_HISTOGRAM
+ buf->time_completed = get_cycles();
+#endif
+
+ if ( buf->dma_wait ) {
+ wakeup( &buf->dma_wait );
+ buf->dma_wait = 0;
+ }
+#if __HAVE_DMA_FREELIST
+ else {
+ drm_device_dma_t *dma = dev->dma;
+ /* If processes are waiting, the last one
+ to wake will put the buffer on the free
+ list. If no processes are waiting, we
+ put the buffer on the freelist here. */
+ DRM(freelist_put)(dev, &dma->bufs[buf->order].freelist, buf);
+ }
+#endif
+}
+
+#if !__HAVE_DMA_RECLAIM
+void DRM(reclaim_buffers)(drm_device_t *dev, pid_t pid)
+{
+ drm_device_dma_t *dma = dev->dma;
+ int i;
+
+ if (!dma) return;
+ for (i = 0; i < dma->buf_count; i++) {
+ if (dma->buflist[i]->pid == pid) {
+ switch (dma->buflist[i]->list) {
+ case DRM_LIST_NONE:
+ DRM(free_buffer)(dev, dma->buflist[i]);
+ break;
+ case DRM_LIST_WAIT:
+ dma->buflist[i]->list = DRM_LIST_RECLAIM;
+ break;
+ default:
+ /* Buffer already on hardware. */
+ break;
+ }
+ }
+ }
+}
+#endif
+
+
+/* GH: This is a big hack for now...
+ */
+#if __HAVE_OLD_DMA
+
+void DRM(clear_next_buffer)(drm_device_t *dev)
+{
+ drm_device_dma_t *dma = dev->dma;
+
+ dma->next_buffer = NULL;
+ if (dma->next_queue && !DRM_BUFCOUNT(&dma->next_queue->waitlist)) {
+ DRM_OS_WAKEUP_INT(&dma->next_queue->flush_queue);
+ }
+ dma->next_queue = NULL;
+}
+
+int DRM(select_queue)(drm_device_t *dev, void (*wrapper)(unsigned long))
+{
+ int i;
+ int candidate = -1;
+ int j = jiffies;
+
+ if (!dev) {
+ DRM_ERROR("No device\n");
+ return -1;
+ }
+ if (!dev->queuelist || !dev->queuelist[DRM_KERNEL_CONTEXT]) {
+ /* This only happens between the time the
+ interrupt is initialized and the time
+ the queues are initialized. */
+ return -1;
+ }
+
+ /* Doing "while locked" DMA? */
+ if (DRM_WAITCOUNT(dev, DRM_KERNEL_CONTEXT)) {
+ return DRM_KERNEL_CONTEXT;
+ }
+
+ /* If there are buffers on the last_context
+ queue, and we have not been executing
+ this context very long, continue to
+ execute this context. */
+ if (dev->last_switch <= j
+ && dev->last_switch + DRM_TIME_SLICE > j
+ && DRM_WAITCOUNT(dev, dev->last_context)) {
+ return dev->last_context;
+ }
+
+ /* Otherwise, find a candidate */
+ for (i = dev->last_checked + 1; i < dev->queue_count; i++) {
+ if (DRM_WAITCOUNT(dev, i)) {
+ candidate = dev->last_checked = i;
+ break;
+ }
+ }
+
+ if (candidate < 0) {
+ for (i = 0; i < dev->queue_count; i++) {
+ if (DRM_WAITCOUNT(dev, i)) {
+ candidate = dev->last_checked = i;
+ break;
+ }
+ }
+ }
+
+ if (wrapper
+ && candidate >= 0
+ && candidate != dev->last_context
+ && dev->last_switch <= j
+ && dev->last_switch + DRM_TIME_SLICE > j) {
+ int s = splclock();
+ if (dev->timer.c_time != dev->last_switch + DRM_TIME_SLICE) {
+ callout_reset(&dev->timer,
+ dev->last_switch + DRM_TIME_SLICE - j,
+ (void (*)(void *))wrapper,
+ dev);
+ }
+ splx(s);
+ return -1;
+ }
+
+ return candidate;
+}
+
+
+int DRM(dma_enqueue)(drm_device_t *dev, drm_dma_t *d)
+{
+ int i;
+ drm_queue_t *q;
+ drm_buf_t *buf;
+ int idx;
+ int while_locked = 0;
+ drm_device_dma_t *dma = dev->dma;
+ int error;
+
+ DRM_DEBUG("%d\n", d->send_count);
+
+ if (d->flags & _DRM_DMA_WHILE_LOCKED) {
+ int context = dev->lock.hw_lock->lock;
+
+ if (!_DRM_LOCK_IS_HELD(context)) {
+ DRM_ERROR("No lock held during \"while locked\""
+ " request\n");
+ DRM_OS_RETURN(EINVAL);
+ }
+ if (d->context != _DRM_LOCKING_CONTEXT(context)
+ && _DRM_LOCKING_CONTEXT(context) != DRM_KERNEL_CONTEXT) {
+ DRM_ERROR("Lock held by %d while %d makes"
+ " \"while locked\" request\n",
+ _DRM_LOCKING_CONTEXT(context),
+ d->context);
+ DRM_OS_RETURN(EINVAL);
+ }
+ q = dev->queuelist[DRM_KERNEL_CONTEXT];
+ while_locked = 1;
+ } else {
+ q = dev->queuelist[d->context];
+ }
+
+
+ atomic_inc(&q->use_count);
+ if (atomic_read(&q->block_write)) {
+ atomic_inc(&q->block_count);
+ for (;;) {
+ if (!atomic_read(&q->block_write)) break;
+ error = tsleep(&q->block_write, PZERO|PCATCH,
+ "dmawr", 0);
+ if (error) {
+ atomic_dec(&q->use_count);
+ return error;
+ }
+ }
+ atomic_dec(&q->block_count);
+ }
+
+ for (i = 0; i < d->send_count; i++) {
+ idx = d->send_indices[i];
+ if (idx < 0 || idx >= dma->buf_count) {
+ atomic_dec(&q->use_count);
+ DRM_ERROR("Index %d (of %d max)\n",
+ d->send_indices[i], dma->buf_count - 1);
+ DRM_OS_RETURN(EINVAL);
+ }
+ buf = dma->buflist[ idx ];
+ if (buf->pid != DRM_OS_CURRENTPID) {
+ atomic_dec(&q->use_count);
+ DRM_ERROR("Process %d using buffer owned by %d\n",
+ DRM_OS_CURRENTPID, buf->pid);
+ DRM_OS_RETURN(EINVAL);
+ }
+ if (buf->list != DRM_LIST_NONE) {
+ atomic_dec(&q->use_count);
+ DRM_ERROR("Process %d using buffer %d on list %d\n",
+ DRM_OS_CURRENTPID, buf->idx, buf->list);
+ }
+ buf->used = d->send_sizes[i];
+ buf->while_locked = while_locked;
+ buf->context = d->context;
+ if (!buf->used) {
+ DRM_ERROR("Queueing 0 length buffer\n");
+ }
+ if (buf->pending) {
+ atomic_dec(&q->use_count);
+ DRM_ERROR("Queueing pending buffer:"
+ " buffer %d, offset %d\n",
+ d->send_indices[i], i);
+ DRM_OS_RETURN(EINVAL);
+ }
+ if (buf->waiting) {
+ atomic_dec(&q->use_count);
+ DRM_ERROR("Queueing waiting buffer:"
+ " buffer %d, offset %d\n",
+ d->send_indices[i], i);
+ DRM_OS_RETURN(EINVAL);
+ }
+ buf->waiting = 1;
+ if (atomic_read(&q->use_count) == 1
+ || atomic_read(&q->finalization)) {
+ DRM(free_buffer)(dev, buf);
+ } else {
+ DRM(waitlist_put)(&q->waitlist, buf);
+ atomic_inc(&q->total_queued);
+ }
+ }
+ atomic_dec(&q->use_count);
+
+ return 0;
+}
+
+static int DRM(dma_get_buffers_of_order)(drm_device_t *dev, drm_dma_t *d,
+ int order)
+{
+ int i;
+ drm_buf_t *buf;
+ drm_device_dma_t *dma = dev->dma;
+
+ for (i = d->granted_count; i < d->request_count; i++) {
+ buf = DRM(freelist_get)(&dma->bufs[order].freelist,
+ d->flags & _DRM_DMA_WAIT);
+ if (!buf) break;
+ if (buf->pending || buf->waiting) {
+ DRM_ERROR("Free buffer %d in use by %d (w%d, p%d)\n",
+ buf->idx,
+ buf->pid,
+ buf->waiting,
+ buf->pending);
+ }
+ buf->pid = DRM_OS_CURRENTPID;
+ if (DRM_OS_COPYTOUSR(&d->request_indices[i],
+ &buf->idx,
+ sizeof(buf->idx)))
+ DRM_OS_RETURN(EFAULT);
+
+ if (DRM_OS_COPYTOUSR(&d->request_sizes[i],
+ &buf->total,
+ sizeof(buf->total)))
+ DRM_OS_RETURN(EFAULT);
+
+ ++d->granted_count;
+ }
+ return 0;
+}
+
+
+int DRM(dma_get_buffers)(drm_device_t *dev, drm_dma_t *dma)
+{
+ int order;
+ int retcode = 0;
+ int tmp_order;
+
+ order = DRM(order)(dma->request_size);
+
+ dma->granted_count = 0;
+ retcode = DRM(dma_get_buffers_of_order)(dev, dma, order);
+
+ if (dma->granted_count < dma->request_count
+ && (dma->flags & _DRM_DMA_SMALLER_OK)) {
+ for (tmp_order = order - 1;
+ !retcode
+ && dma->granted_count < dma->request_count
+ && tmp_order >= DRM_MIN_ORDER;
+ --tmp_order) {
+
+ retcode = DRM(dma_get_buffers_of_order)(dev, dma,
+ tmp_order);
+ }
+ }
+
+ if (dma->granted_count < dma->request_count
+ && (dma->flags & _DRM_DMA_LARGER_OK)) {
+ for (tmp_order = order + 1;
+ !retcode
+ && dma->granted_count < dma->request_count
+ && tmp_order <= DRM_MAX_ORDER;
+ ++tmp_order) {
+
+ retcode = DRM(dma_get_buffers_of_order)(dev, dma,
+ tmp_order);
+ }
+ }
+ return 0;
+}
+
+#endif /* __HAVE_OLD_DMA */
+
+
+#if __HAVE_DMA_IRQ
+
+int DRM(irq_install)( drm_device_t *dev, int irq )
+{
+ int rid;
+ int retcode;
+
+ if ( !irq )
+ DRM_OS_RETURN(EINVAL);
+
+ DRM_OS_LOCK;
+ if ( dev->irq ) {
+ DRM_OS_UNLOCK;
+ DRM_OS_RETURN(EBUSY);
+ }
+ dev->irq = irq;
+ DRM_OS_UNLOCK;
+
+ DRM_DEBUG( "%s: irq=%d\n", __FUNCTION__, irq );
+
+ dev->context_flag = 0;
+ dev->interrupt_flag = 0;
+ dev->dma_flag = 0;
+
+ dev->dma->next_buffer = NULL;
+ dev->dma->next_queue = NULL;
+ dev->dma->this_buffer = NULL;
+
+#if __HAVE_DMA_IRQ_BH
+ TASK_INIT(&dev->task, 0, DRM(dma_immediate_bh), dev);
+#endif
+
+ /* Before installing handler */
+ DRIVER_PREINSTALL();
+
+ /* Install handler */
+ rid = 0;
+ dev->irqr = bus_alloc_resource(dev->device, SYS_RES_IRQ, &rid,
+ 0, ~0, 1, RF_SHAREABLE);
+ if (!dev->irqr)
+ return ENOENT;
+
+ retcode = bus_setup_intr(dev->device, dev->irqr, INTR_TYPE_TTY,
+ DRM(dma_service), dev, &dev->irqh);
+ if ( retcode ) {
+ DRM_OS_LOCK;
+ bus_release_resource(dev->device, SYS_RES_IRQ, 0, dev->irqr);
+ dev->irq = 0;
+ DRM_OS_UNLOCK;
+ return retcode;
+ }
+
+ /* After installing handler */
+ DRIVER_POSTINSTALL();
+
+ return 0;
+}
+
+int DRM(irq_uninstall)( drm_device_t *dev )
+{
+ int irq;
+
+ DRM_OS_LOCK;
+ irq = dev->irq;
+ dev->irq = 0;
+ DRM_OS_UNLOCK;
+
+ if ( !irq )
+ DRM_OS_RETURN(EINVAL);
+
+ DRM_DEBUG( "%s: irq=%d\n", __FUNCTION__, irq );
+
+ DRIVER_UNINSTALL();
+
+ bus_teardown_intr(dev->device, dev->irqr, dev->irqh);
+ bus_release_resource(dev->device, SYS_RES_IRQ, 0, dev->irqr);
+
+ return 0;
+}
+
+int DRM(control)( DRM_OS_IOCTL )
+{
+ DRM_OS_DEVICE;
+ drm_control_t ctl;
+
+ DRM_OS_KRNFROMUSR( ctl, (drm_control_t *) data, sizeof(ctl) );
+
+ switch ( ctl.func ) {
+ case DRM_INST_HANDLER:
+ return DRM(irq_install)( dev, ctl.irq );
+ case DRM_UNINST_HANDLER:
+ return DRM(irq_uninstall)( dev );
+ default:
+ DRM_OS_RETURN(EINVAL);
+ }
+}
+
+#endif /* __HAVE_DMA_IRQ */
+
+#endif /* __HAVE_DMA */
diff --git a/bsd-core/drm_drawable.c b/bsd-core/drm_drawable.c
new file mode 100644
index 00000000..f57d8628
--- /dev/null
+++ b/bsd-core/drm_drawable.c
@@ -0,0 +1,50 @@
+/* drm_drawable.h -- IOCTLs for drawables -*- linux-c -*-
+ * Created: Tue Feb 2 08:37:54 1999 by faith@valinux.com
+ *
+ * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
+ * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors:
+ * Rickard E. (Rik) Faith <faith@valinux.com>
+ * Gareth Hughes <gareth@valinux.com>
+ */
+
+#define __NO_VERSION__
+#include "drmP.h"
+
+int DRM(adddraw)( DRM_OS_IOCTL )
+{
+ drm_draw_t draw;
+
+ draw.handle = 0; /* NOOP */
+ DRM_DEBUG("%d\n", draw.handle);
+
+ DRM_OS_KRNTOUSR( (drm_draw_t *)data, draw, sizeof(draw) );
+
+ return 0;
+}
+
+int DRM(rmdraw)( DRM_OS_IOCTL )
+{
+ return 0; /* NOOP */
+}
diff --git a/bsd-core/drm_drv.c b/bsd-core/drm_drv.c
new file mode 100644
index 00000000..4e5d76fb
--- /dev/null
+++ b/bsd-core/drm_drv.c
@@ -0,0 +1,1160 @@
+/* drm_drv.h -- Generic driver template -*- linux-c -*-
+ * Created: Thu Nov 23 03:10:50 2000 by gareth@valinux.com
+ *
+ * Copyright 1999, 2000 Precision Insight, Inc., Cedar Park, Texas.
+ * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors:
+ * Rickard E. (Rik) Faith <faith@valinux.com>
+ * Gareth Hughes <gareth@valinux.com>
+ */
+
+/*
+ * To use this template, you must at least define the following (samples
+ * given for the MGA driver):
+ *
+ * #define DRIVER_AUTHOR "VA Linux Systems, Inc."
+ *
+ * #define DRIVER_NAME "mga"
+ * #define DRIVER_DESC "Matrox G200/G400"
+ * #define DRIVER_DATE "20001127"
+ *
+ * #define DRIVER_MAJOR 2
+ * #define DRIVER_MINOR 0
+ * #define DRIVER_PATCHLEVEL 2
+ *
+ * #define DRIVER_IOCTL_COUNT DRM_ARRAY_SIZE( mga_ioctls )
+ *
+ * #define DRM(x) mga_##x
+ */
+
+#ifndef __MUST_HAVE_AGP
+#define __MUST_HAVE_AGP 0
+#endif
+#ifndef __HAVE_CTX_BITMAP
+#define __HAVE_CTX_BITMAP 0
+#endif
+#ifndef __HAVE_DMA_IRQ
+#define __HAVE_DMA_IRQ 0
+#endif
+#ifndef __HAVE_DMA_QUEUE
+#define __HAVE_DMA_QUEUE 0
+#endif
+#ifndef __HAVE_MULTIPLE_DMA_QUEUES
+#define __HAVE_MULTIPLE_DMA_QUEUES 0
+#endif
+#ifndef __HAVE_DMA_SCHEDULE
+#define __HAVE_DMA_SCHEDULE 0
+#endif
+#ifndef __HAVE_DMA_FLUSH
+#define __HAVE_DMA_FLUSH 0
+#endif
+#ifndef __HAVE_DMA_READY
+#define __HAVE_DMA_READY 0
+#endif
+#ifndef __HAVE_DMA_QUIESCENT
+#define __HAVE_DMA_QUIESCENT 0
+#endif
+#ifndef __HAVE_RELEASE
+#define __HAVE_RELEASE 0
+#endif
+#ifndef __HAVE_COUNTERS
+#define __HAVE_COUNTERS 0
+#endif
+#ifndef __HAVE_SG
+#define __HAVE_SG 0
+#endif
+#ifndef __HAVE_KERNEL_CTX_SWITCH
+#define __HAVE_KERNEL_CTX_SWITCH 0
+#endif
+#ifndef PCI_ANY_ID
+#define PCI_ANY_ID ~0
+#endif
+
+#ifndef DRIVER_PREINIT
+#define DRIVER_PREINIT()
+#endif
+#ifndef DRIVER_POSTINIT
+#define DRIVER_POSTINIT()
+#endif
+#ifndef DRIVER_PRERELEASE
+#define DRIVER_PRERELEASE()
+#endif
+#ifndef DRIVER_PRETAKEDOWN
+#define DRIVER_PRETAKEDOWN()
+#endif
+#ifndef DRIVER_POSTCLEANUP
+#define DRIVER_POSTCLEANUP()
+#endif
+#ifndef DRIVER_PRESETUP
+#define DRIVER_PRESETUP()
+#endif
+#ifndef DRIVER_POSTSETUP
+#define DRIVER_POSTSETUP()
+#endif
+#ifndef DRIVER_IOCTLS
+#define DRIVER_IOCTLS
+#endif
+#ifndef DRIVER_FOPS
+#if DRM_LINUX
+#include <sys/file.h>
+#include <sys/proc.h>
+#include <machine/../linux/linux.h>
+#include <machine/../linux/linux_proto.h>
+#include "drm_linux.h"
+#endif
+#endif
+
+
+/*
+ * The default number of instances (minor numbers) to initialize.
+ */
+#ifndef DRIVER_NUM_CARDS
+#define DRIVER_NUM_CARDS 1
+#endif
+
+static int DRM(init)(device_t nbdev);
+static void DRM(cleanup)(device_t nbdev);
+
+#define CDEV_MAJOR 145
+#define DRIVER_SOFTC(unit) \
+ ((drm_device_t *) devclass_get_softc(DRM(devclass), unit))
+
+#if __REALLY_HAVE_AGP
+MODULE_DEPEND(DRIVER_NAME, agp, 1, 1, 1);
+#endif
+#if DRM_LINUX
+MODULE_DEPEND(DRIVER_NAME, linux, 1, 1, 1);
+#endif
+
+static drm_device_t *DRM(device);
+static int *DRM(minor);
+static int DRM(numdevs) = 0;
+
+
+static drm_ioctl_desc_t DRM(ioctls)[] = {
+ [DRM_IOCTL_NR(DRM_IOCTL_VERSION)] = { DRM(version), 0, 0 },
+ [DRM_IOCTL_NR(DRM_IOCTL_GET_UNIQUE)] = { DRM(getunique), 0, 0 },
+ [DRM_IOCTL_NR(DRM_IOCTL_GET_MAGIC)] = { DRM(getmagic), 0, 0 },
+ [DRM_IOCTL_NR(DRM_IOCTL_IRQ_BUSID)] = { DRM(irq_busid), 0, 1 },
+ [DRM_IOCTL_NR(DRM_IOCTL_GET_MAP)] = { DRM(getmap), 0, 0 },
+ [DRM_IOCTL_NR(DRM_IOCTL_GET_CLIENT)] = { DRM(getclient), 0, 0 },
+ [DRM_IOCTL_NR(DRM_IOCTL_GET_STATS)] = { DRM(getstats), 0, 0 },
+
+ [DRM_IOCTL_NR(DRM_IOCTL_SET_UNIQUE)] = { DRM(setunique), 1, 1 },
+ [DRM_IOCTL_NR(DRM_IOCTL_BLOCK)] = { DRM(block), 1, 1 },
+ [DRM_IOCTL_NR(DRM_IOCTL_UNBLOCK)] = { DRM(unblock), 1, 1 },
+ [DRM_IOCTL_NR(DRM_IOCTL_AUTH_MAGIC)] = { DRM(authmagic), 1, 1 },
+
+ [DRM_IOCTL_NR(DRM_IOCTL_ADD_MAP)] = { DRM(addmap), 1, 1 },
+ [DRM_IOCTL_NR(DRM_IOCTL_RM_MAP)] = { DRM(rmmap), 1, 0 },
+
+#if __HAVE_CTX_BITMAP
+ [DRM_IOCTL_NR(DRM_IOCTL_SET_SAREA_CTX)] = { DRM(setsareactx), 1, 1 },
+ [DRM_IOCTL_NR(DRM_IOCTL_GET_SAREA_CTX)] = { DRM(getsareactx), 1, 0 },
+#endif
+
+ [DRM_IOCTL_NR(DRM_IOCTL_ADD_CTX)] = { DRM(addctx), 1, 1 },
+ [DRM_IOCTL_NR(DRM_IOCTL_RM_CTX)] = { DRM(rmctx), 1, 1 },
+ [DRM_IOCTL_NR(DRM_IOCTL_MOD_CTX)] = { DRM(modctx), 1, 1 },
+ [DRM_IOCTL_NR(DRM_IOCTL_GET_CTX)] = { DRM(getctx), 1, 0 },
+ [DRM_IOCTL_NR(DRM_IOCTL_SWITCH_CTX)] = { DRM(switchctx), 1, 1 },
+ [DRM_IOCTL_NR(DRM_IOCTL_NEW_CTX)] = { DRM(newctx), 1, 1 },
+ [DRM_IOCTL_NR(DRM_IOCTL_RES_CTX)] = { DRM(resctx), 1, 0 },
+
+ [DRM_IOCTL_NR(DRM_IOCTL_ADD_DRAW)] = { DRM(adddraw), 1, 1 },
+ [DRM_IOCTL_NR(DRM_IOCTL_RM_DRAW)] = { DRM(rmdraw), 1, 1 },
+
+ [DRM_IOCTL_NR(DRM_IOCTL_LOCK)] = { DRM(lock), 1, 0 },
+ [DRM_IOCTL_NR(DRM_IOCTL_UNLOCK)] = { DRM(unlock), 1, 0 },
+ [DRM_IOCTL_NR(DRM_IOCTL_FINISH)] = { DRM(finish), 1, 0 },
+
+#if __HAVE_DMA
+ [DRM_IOCTL_NR(DRM_IOCTL_ADD_BUFS)] = { DRM(addbufs), 1, 1 },
+ [DRM_IOCTL_NR(DRM_IOCTL_MARK_BUFS)] = { DRM(markbufs), 1, 1 },
+ [DRM_IOCTL_NR(DRM_IOCTL_INFO_BUFS)] = { DRM(infobufs), 1, 0 },
+ [DRM_IOCTL_NR(DRM_IOCTL_MAP_BUFS)] = { DRM(mapbufs), 1, 0 },
+ [DRM_IOCTL_NR(DRM_IOCTL_FREE_BUFS)] = { DRM(freebufs), 1, 0 },
+
+ /* The DRM_IOCTL_DMA ioctl should be defined by the driver.
+ */
+#if __HAVE_DMA_IRQ
+ [DRM_IOCTL_NR(DRM_IOCTL_CONTROL)] = { DRM(control), 1, 1 },
+#endif
+#endif
+
+#if __REALLY_HAVE_AGP
+ [DRM_IOCTL_NR(DRM_IOCTL_AGP_ACQUIRE)] = { DRM(agp_acquire), 1, 1 },
+ [DRM_IOCTL_NR(DRM_IOCTL_AGP_RELEASE)] = { DRM(agp_release), 1, 1 },
+ [DRM_IOCTL_NR(DRM_IOCTL_AGP_ENABLE)] = { DRM(agp_enable), 1, 1 },
+ [DRM_IOCTL_NR(DRM_IOCTL_AGP_INFO)] = { DRM(agp_info), 1, 0 },
+ [DRM_IOCTL_NR(DRM_IOCTL_AGP_ALLOC)] = { DRM(agp_alloc), 1, 1 },
+ [DRM_IOCTL_NR(DRM_IOCTL_AGP_FREE)] = { DRM(agp_free), 1, 1 },
+ [DRM_IOCTL_NR(DRM_IOCTL_AGP_BIND)] = { DRM(agp_bind), 1, 1 },
+ [DRM_IOCTL_NR(DRM_IOCTL_AGP_UNBIND)] = { DRM(agp_unbind), 1, 1 },
+#endif
+
+#if __REALLY_HAVE_SG
+ [DRM_IOCTL_NR(DRM_IOCTL_SG_ALLOC)] = { DRM(sg_alloc), 1, 1 },
+ [DRM_IOCTL_NR(DRM_IOCTL_SG_FREE)] = { DRM(sg_free), 1, 1 },
+#endif
+
+ DRIVER_IOCTLS
+};
+
+#define DRIVER_IOCTL_COUNT DRM_ARRAY_SIZE( DRM(ioctls) )
+
+
+static int DRM(probe)(device_t dev)
+{
+ const char *s = 0;
+
+ int pciid=pci_get_devid(dev);
+ int vendor = (pciid & 0x0000ffff);
+ int device = (pciid & 0xffff0000) >> 16;
+ int i=0, done=0;
+ DRM_INFO("Checking PCI vendor=%d, device=%d\n", vendor, device);
+ while ( !done && (DRM(devicelist)[i].vendor != 0 ) ) {
+ if ( (DRM(devicelist)[i].vendor == vendor) &&
+ (DRM(devicelist)[i].device == device) ) {
+ done=1;
+ if ( DRM(devicelist)[i].supported )
+ s = DRM(devicelist)[i].name;
+ else
+ DRM_INFO("%s not supported\n", DRM(devicelist)[i].name);
+ }
+ i++;
+ }
+
+ if (s) {
+ device_set_desc(dev, s);
+ return 0;
+ }
+
+ return ENXIO;
+}
+
+static int DRM(attach)(device_t dev)
+{
+ return DRM(init)(dev);
+}
+
+static int DRM(detach)(device_t dev)
+{
+ DRM(cleanup)(dev);
+ return 0;
+}
+
+static device_method_t DRM(methods)[] = {
+ /* Device interface */
+ DEVMETHOD(device_probe, DRM( probe)),
+ DEVMETHOD(device_attach, DRM( attach)),
+ DEVMETHOD(device_detach, DRM( detach)),
+
+ { 0, 0 }
+};
+
+static driver_t DRM(driver) = {
+ "drm",
+ DRM(methods),
+ sizeof(drm_device_t),
+};
+
+static devclass_t DRM( devclass);
+
+static struct cdevsw DRM( cdevsw) = {
+ /* open */ DRM( open ),
+ /* close */ DRM( close ),
+ /* read */ DRM( read ),
+ /* write */ DRM( write ),
+ /* ioctl */ DRM( ioctl ),
+ /* poll */ DRM( poll ),
+ /* mmap */ DRM( mmap ),
+ /* strategy */ nostrategy,
+ /* name */ DRIVER_NAME,
+ /* maj */ CDEV_MAJOR,
+ /* dump */ nodump,
+ /* psize */ nopsize,
+ /* flags */ D_TTY | D_TRACKCLOSE,
+#if __FreeBSD_version >= 500000
+ /* kqfilter */ 0
+#else
+ /* bmaj */ -1
+#endif
+};
+
+static int DRM(setup)( drm_device_t *dev )
+{
+ int i;
+
+ DRIVER_PRESETUP();
+ atomic_set( &dev->ioctl_count, 0 );
+ atomic_set( &dev->vma_count, 0 );
+ dev->buf_use = 0;
+ atomic_set( &dev->buf_alloc, 0 );
+
+#if __HAVE_DMA
+ i = DRM(dma_setup)( dev );
+ if ( i < 0 )
+ return i;
+#endif
+
+ dev->counters = 6 + __HAVE_COUNTERS;
+ dev->types[0] = _DRM_STAT_LOCK;
+ dev->types[1] = _DRM_STAT_OPENS;
+ dev->types[2] = _DRM_STAT_CLOSES;
+ dev->types[3] = _DRM_STAT_IOCTLS;
+ dev->types[4] = _DRM_STAT_LOCKS;
+ dev->types[5] = _DRM_STAT_UNLOCKS;
+#ifdef __HAVE_COUNTER6
+ dev->types[6] = __HAVE_COUNTER6;
+#endif
+#ifdef __HAVE_COUNTER7
+ dev->types[7] = __HAVE_COUNTER7;
+#endif
+#ifdef __HAVE_COUNTER8
+ dev->types[8] = __HAVE_COUNTER8;
+#endif
+#ifdef __HAVE_COUNTER9
+ dev->types[9] = __HAVE_COUNTER9;
+#endif
+#ifdef __HAVE_COUNTER10
+ dev->types[10] = __HAVE_COUNTER10;
+#endif
+#ifdef __HAVE_COUNTER11
+ dev->types[11] = __HAVE_COUNTER11;
+#endif
+#ifdef __HAVE_COUNTER12
+ dev->types[12] = __HAVE_COUNTER12;
+#endif
+#ifdef __HAVE_COUNTER13
+ dev->types[13] = __HAVE_COUNTER13;
+#endif
+#ifdef __HAVE_COUNTER14
+ dev->types[14] = __HAVE_COUNTER14;
+#endif
+#ifdef __HAVE_COUNTER15
+ dev->types[14] = __HAVE_COUNTER14;
+#endif
+
+ for ( i = 0 ; i < DRM_ARRAY_SIZE(dev->counts) ; i++ )
+ atomic_set( &dev->counts[i], 0 );
+
+ for ( i = 0 ; i < DRM_HASH_SIZE ; i++ ) {
+ dev->magiclist[i].head = NULL;
+ dev->magiclist[i].tail = NULL;
+ }
+
+ dev->maplist = DRM(alloc)(sizeof(*dev->maplist),
+ DRM_MEM_MAPS);
+ if(dev->maplist == NULL) DRM_OS_RETURN(ENOMEM);
+ memset(dev->maplist, 0, sizeof(*dev->maplist));
+ TAILQ_INIT(dev->maplist);
+ dev->map_count = 0;
+
+ dev->vmalist = NULL;
+ dev->lock.hw_lock = NULL;
+ dev->lock.lock_queue = 0;
+ dev->queue_count = 0;
+ dev->queue_reserved = 0;
+ dev->queue_slots = 0;
+ dev->queuelist = NULL;
+ dev->irq = 0;
+ dev->context_flag = 0;
+ dev->interrupt_flag = 0;
+ dev->dma_flag = 0;
+ dev->last_context = 0;
+ dev->last_switch = 0;
+ dev->last_checked = 0;
+#if __FreeBSD_version >= 500000
+ callout_init( &dev->timer, 1 );
+#else
+ callout_init( &dev->timer );
+#endif
+ dev->context_wait = 0;
+
+ dev->ctx_start = 0;
+ dev->lck_start = 0;
+
+ dev->buf_rp = dev->buf;
+ dev->buf_wp = dev->buf;
+ dev->buf_end = dev->buf + DRM_BSZ;
+ dev->buf_sigio = NULL;
+ dev->buf_readers = 0;
+ dev->buf_writers = 0;
+ dev->buf_selecting = 0;
+
+ DRM_DEBUG( "\n" );
+
+ /* The kernel's context could be created here, but is now created
+ * in drm_dma_enqueue. This is more resource-efficient for
+ * hardware that does not do DMA, but may mean that
+ * drm_select_queue fails between the time the interrupt is
+ * initialized and the time the queues are initialized.
+ */
+ DRIVER_POSTSETUP();
+ return 0;
+}
+
+
+static int DRM(takedown)( drm_device_t *dev )
+{
+ drm_magic_entry_t *pt, *next;
+ drm_map_t *map;
+ drm_map_list_entry_t *list;
+ drm_vma_entry_t *vma, *vma_next;
+ int i;
+
+ DRM_DEBUG( "\n" );
+
+ DRIVER_PRETAKEDOWN();
+#if __HAVE_DMA_IRQ
+ if ( dev->irq ) DRM(irq_uninstall)( dev );
+#endif
+
+ DRM_OS_LOCK;
+ callout_stop( &dev->timer );
+
+ if ( dev->devname ) {
+ DRM(free)( dev->devname, strlen( dev->devname ) + 1,
+ DRM_MEM_DRIVER );
+ dev->devname = NULL;
+ }
+
+ if ( dev->unique ) {
+ DRM(free)( dev->unique, strlen( dev->unique ) + 1,
+ DRM_MEM_DRIVER );
+ dev->unique = NULL;
+ dev->unique_len = 0;
+ }
+ /* Clear pid list */
+ for ( i = 0 ; i < DRM_HASH_SIZE ; i++ ) {
+ for ( pt = dev->magiclist[i].head ; pt ; pt = next ) {
+ next = pt->next;
+ DRM(free)( pt, sizeof(*pt), DRM_MEM_MAGIC );
+ }
+ dev->magiclist[i].head = dev->magiclist[i].tail = NULL;
+ }
+
+#if __REALLY_HAVE_AGP
+ /* Clear AGP information */
+ if ( dev->agp ) {
+ drm_agp_mem_t *entry;
+ drm_agp_mem_t *nexte;
+
+ /* Remove AGP resources, but leave dev->agp
+ intact until drv_cleanup is called. */
+ for ( entry = dev->agp->memory ; entry ; entry = nexte ) {
+ nexte = entry->next;
+ if ( entry->bound ) DRM(unbind_agp)( entry->handle );
+ DRM(free_agp)( entry->handle, entry->pages );
+ DRM(free)( entry, sizeof(*entry), DRM_MEM_AGPLISTS );
+ }
+ dev->agp->memory = NULL;
+
+ if ( dev->agp->acquired ) DRM(agp_do_release)();
+
+ dev->agp->acquired = 0;
+ dev->agp->enabled = 0;
+ }
+#endif
+
+ /* Clear vma list (only built for debugging) */
+ if ( dev->vmalist ) {
+ for ( vma = dev->vmalist ; vma ; vma = vma_next ) {
+ vma_next = vma->next;
+ DRM(free)( vma, sizeof(*vma), DRM_MEM_VMAS );
+ }
+ dev->vmalist = NULL;
+ }
+
+ if( dev->maplist ) {
+ while ((list=TAILQ_FIRST(dev->maplist))) {
+ map = list->map;
+ switch ( map->type ) {
+ case _DRM_REGISTERS:
+ case _DRM_FRAME_BUFFER:
+#if __REALLY_HAVE_MTRR
+ if ( map->mtrr >= 0 ) {
+ int retcode;
+ retcode = mtrr_del( map->mtrr,
+ map->offset,
+ map->size );
+ DRM_DEBUG( "mtrr_del=%d\n", retcode );
+ }
+#endif
+ DRM(ioremapfree)( map->handle, map->size );
+ break;
+ case _DRM_SHM:
+ DRM(free_pages)((unsigned long)map->handle,
+ DRM(order)(map->size)
+ - PAGE_SHIFT,
+ DRM_MEM_SAREA);
+ break;
+
+ case _DRM_AGP:
+ /* Do nothing here, because this is all
+ * handled in the AGP/GART driver.
+ */
+ break;
+ case _DRM_SCATTER_GATHER:
+ /* Handle it, but do nothing, if REALLY_HAVE_SG
+ * isn't defined.
+ */
+#if __REALLY_HAVE_SG
+ if(dev->sg) {
+ DRM(sg_cleanup)(dev->sg);
+ dev->sg = NULL;
+ }
+#endif
+ break;
+ }
+ TAILQ_REMOVE(dev->maplist, list, link);
+ DRM(free)(list, sizeof(*list), DRM_MEM_MAPS);
+ DRM(free)(map, sizeof(*map), DRM_MEM_MAPS);
+ }
+ DRM(free)(dev->maplist, sizeof(*dev->maplist), DRM_MEM_MAPS);
+ dev->maplist = NULL;
+ }
+
+#if __HAVE_DMA_QUEUE || __HAVE_MULTIPLE_DMA_QUEUES
+ if ( dev->queuelist ) {
+ for ( i = 0 ; i < dev->queue_count ; i++ ) {
+ DRM(waitlist_destroy)( &dev->queuelist[i]->waitlist );
+ if ( dev->queuelist[i] ) {
+ DRM(free)( dev->queuelist[i],
+ sizeof(*dev->queuelist[0]),
+ DRM_MEM_QUEUES );
+ dev->queuelist[i] = NULL;
+ }
+ }
+ DRM(free)( dev->queuelist,
+ dev->queue_slots * sizeof(*dev->queuelist),
+ DRM_MEM_QUEUES );
+ dev->queuelist = NULL;
+ }
+ dev->queue_count = 0;
+#endif
+
+#if __HAVE_DMA
+ DRM(dma_takedown)( dev );
+#endif
+ if ( dev->lock.hw_lock ) {
+ dev->lock.hw_lock = NULL; /* SHM removed */
+ dev->lock.pid = 0;
+ DRM_OS_WAKEUP_INT(&dev->lock.lock_queue);
+ }
+ DRM_OS_UNLOCK;
+
+ return 0;
+}
+
+/*
+ * Figure out how many instances to initialize.
+ */
+static int drm_count_cards(void)
+{
+ int num = 0;
+#if defined(DRIVER_CARD_LIST)
+ int i;
+ drm_pci_list_t *l;
+ u16 device, vendor;
+ struct pci_dev *pdev = NULL;
+#endif
+
+ DRM_DEBUG( "\n" );
+
+#if defined(DRIVER_COUNT_CARDS)
+ num = DRIVER_COUNT_CARDS();
+#elif defined(DRIVER_CARD_LIST)
+ for (i = 0, l = DRIVER_CARD_LIST; l[i].vendor != 0; i++) {
+ pdev = NULL;
+ vendor = l[i].vendor;
+ device = l[i].device;
+ if(device == 0xffff) device = PCI_ANY_ID;
+ if(vendor == 0xffff) vendor = PCI_ANY_ID;
+ while ((pdev = pci_find_device(vendor, device, pdev))) {
+ num++; /* FIXME: What about two cards of the same device id? */
+ }
+ }
+#else
+ num = DRIVER_NUM_CARDS;
+#endif
+ DRM_DEBUG("numdevs = %d\n", num);
+ return num;
+}
+
+/* drm_init is called via init_module at module load time, or via
+ * linux/init/main.c (this is not currently supported).
+ */
+static int DRM(init)( device_t nbdev )
+{
+
+ drm_device_t *dev;
+ int i;
+#if __HAVE_CTX_BITMAP
+ int retcode;
+#endif
+ DRM_DEBUG( "\n" );
+
+#ifdef MODULE
+ DRM(parse_options)( drm_opts );
+#endif
+
+ DRM(numdevs) = drm_count_cards();
+ /* Force at least one instance. */
+ if (DRM(numdevs) <= 0)
+ DRM(numdevs) = 1;
+
+ DRM(device) = DRM_OS_MALLOC(sizeof(*DRM(device)) * DRM(numdevs));
+ if (!DRM(device)) {
+ DRM_OS_RETURN(ENOMEM);
+ }
+ DRM(minor) = DRM_OS_MALLOC(sizeof(*(DRM(minor))) * DRM(numdevs));
+ if (!DRM(minor)) {
+ DRM_OS_FREE(DRM(device));
+ DRM_OS_RETURN(ENOMEM);
+ }
+
+ DRIVER_PREINIT();
+
+
+ for (i = 0; i < DRM(numdevs); i++) {
+ int unit = device_get_unit(nbdev);
+ /* FIXME??? - multihead !!! */
+ dev = device_get_softc(nbdev);
+ memset( (void *)dev, 0, sizeof(*dev) );
+ DRM(minor)[i]=unit;
+ DRM_OS_SPININIT(dev->count_lock, "drm device");
+ lockinit(&dev->dev_lock, PZERO, "drmlk", 0, 0);
+ dev->device = nbdev;
+ dev->devnode = make_dev( &DRM(cdevsw),
+ unit,
+ DRM_DEV_UID,
+ DRM_DEV_GID,
+ DRM_DEV_MODE,
+ "dri/card%d", unit );
+ dev->name = DRIVER_NAME;
+ DRM(mem_init)();
+ DRM(sysctl_init)(dev);
+ TAILQ_INIT(&dev->files);
+
+#if __REALLY_HAVE_AGP
+ dev->agp = DRM(agp_init)();
+#if __MUST_HAVE_AGP
+ if ( dev->agp == NULL ) {
+ DRM_ERROR( "Cannot initialize the agpgart module.\n" );
+ DRM(sysctl_cleanup)( dev );
+ destroy_dev(dev->devnode);
+ DRM(takedown)( dev );
+ DRM_OS_RETURN(ENOMEM);
+ }
+#endif
+#if __REALLY_HAVE_MTRR
+ if (dev->agp)
+ dev->agp->agp_mtrr = mtrr_add( dev->agp->agp_info.aper_base,
+ dev->agp->agp_info.aper_size*1024*1024,
+ MTRR_TYPE_WRCOMB,
+ 1 );
+#endif
+#endif
+
+#if __HAVE_CTX_BITMAP
+ retcode = DRM(ctxbitmap_init)( dev );
+ if( retcode ) {
+ DRM_ERROR( "Cannot allocate memory for context bitmap.\n" );
+ DRM(sysctl_cleanup)( dev );
+ destroy_dev(dev->devnode);
+ DRM(takedown)( dev );
+ return retcode;
+ }
+#endif
+ DRM_INFO( "Initialized %s %d.%d.%d %s on minor %d\n",
+ DRIVER_NAME,
+ DRIVER_MAJOR,
+ DRIVER_MINOR,
+ DRIVER_PATCHLEVEL,
+ DRIVER_DATE,
+ DRM(minor)[i] );
+ }
+
+ DRIVER_POSTINIT();
+
+ return 0;
+}
+
+/* drm_cleanup is called via cleanup_module at module unload time.
+ */
+static void DRM(cleanup)(device_t nbdev)
+{
+ drm_device_t *dev;
+ int i;
+
+ DRM_DEBUG( "\n" );
+
+ for (i = DRM(numdevs) - 1; i >= 0; i--) {
+ /* FIXME??? - multihead */
+ dev = device_get_softc(nbdev);
+ DRM(sysctl_cleanup)( dev );
+ destroy_dev(dev->devnode);
+#if __HAVE_CTX_BITMAP
+ DRM(ctxbitmap_cleanup)( dev );
+#endif
+
+#if __REALLY_HAVE_AGP && __REALLY_HAVE_MTRR
+ if ( dev->agp && dev->agp->agp_mtrr >= 0) {
+ int retval;
+ retval = mtrr_del( dev->agp->agp_mtrr,
+ dev->agp->agp_info.aper_base,
+ dev->agp->agp_info.aper_size*1024*1024 );
+ DRM_DEBUG( "mtrr_del=%d\n", retval );
+ }
+#endif
+
+ DRM(takedown)( dev );
+
+#if __REALLY_HAVE_AGP
+ if ( dev->agp ) {
+ DRM(agp_uninit)();
+ DRM(free)( dev->agp, sizeof(*dev->agp), DRM_MEM_AGPLISTS );
+ dev->agp = NULL;
+ }
+#endif
+ }
+ DRIVER_POSTCLEANUP();
+ DRM_OS_FREE(DRM(minor));
+ DRM_OS_FREE(DRM(device));
+ DRM(numdevs) = 0;
+}
+
+
+int DRM(version)( DRM_OS_IOCTL )
+{
+ drm_version_t version;
+ int len;
+
+ DRM_OS_KRNFROMUSR( version, (drm_version_t *)data, sizeof(version) );
+
+#define DRM_COPY( name, value ) \
+ len = strlen( value ); \
+ if ( len > name##_len ) len = name##_len; \
+ name##_len = strlen( value ); \
+ if ( len && name ) { \
+ if ( DRM_OS_COPYTOUSR( name, value, len ) ) \
+ DRM_OS_RETURN(EFAULT); \
+ }
+
+ version.version_major = DRIVER_MAJOR;
+ version.version_minor = DRIVER_MINOR;
+ version.version_patchlevel = DRIVER_PATCHLEVEL;
+
+ DRM_COPY( version.name, DRIVER_NAME );
+ DRM_COPY( version.date, DRIVER_DATE );
+ DRM_COPY( version.desc, DRIVER_DESC );
+
+ DRM_OS_KRNTOUSR( (drm_version_t *)data, version, sizeof(version) );
+
+ return 0;
+}
+
+int DRM( open)(dev_t kdev, int flags, int fmt, DRM_OS_STRUCTPROC *p)
+{
+ drm_device_t *dev = NULL;
+ int retcode = 0;
+ int i;
+
+ for (i = 0; i < DRM(numdevs); i++) {
+ /* FIXME ??? - multihead */
+ dev = DRIVER_SOFTC(minor(kdev));
+ }
+ if (!dev) {
+ DRM_OS_RETURN(ENODEV);
+ }
+
+ DRM_DEBUG( "open_count = %d\n", dev->open_count );
+
+ device_busy(dev->device);
+ retcode = DRM(open_helper)(kdev, flags, fmt, p, dev);
+
+ if ( !retcode ) {
+ atomic_inc( &dev->counts[_DRM_STAT_OPENS] );
+ DRM_OS_SPINLOCK( &dev->count_lock );
+ if ( !dev->open_count++ ) {
+ DRM_OS_SPINUNLOCK( &dev->count_lock );
+ return DRM(setup)( dev );
+ }
+ DRM_OS_SPINUNLOCK( &dev->count_lock );
+ }
+ device_unbusy(dev->device);
+
+ return retcode;
+}
+
+int DRM( close)(dev_t kdev, int flags, int fmt, DRM_OS_STRUCTPROC *p)
+{
+ drm_file_t *priv;
+ drm_device_t *dev = kdev->si_drv1;
+ int retcode = 0;
+
+ DRM_DEBUG( "open_count = %d\n", dev->open_count );
+ priv = DRM(find_file_by_proc)(dev, p);
+ if (!priv) {
+ DRM_DEBUG("can't find authenticator\n");
+ return EINVAL;
+ }
+
+ DRIVER_PRERELEASE();
+
+ /* ========================================================
+ * Begin inline drm_release
+ */
+
+ DRM_DEBUG( "pid = %d, device = 0x%lx, open_count = %d\n",
+ DRM_OS_CURRENTPID, (long)dev->device, dev->open_count );
+
+ if (dev->lock.hw_lock && _DRM_LOCK_IS_HELD(dev->lock.hw_lock->lock)
+ && dev->lock.pid == DRM_OS_CURRENTPID) {
+ DRM_DEBUG("Process %d dead, freeing lock for context %d\n",
+ DRM_OS_CURRENTPID,
+ _DRM_LOCKING_CONTEXT(dev->lock.hw_lock->lock));
+#if HAVE_DRIVER_RELEASE
+ DRIVER_RELEASE();
+#endif
+ DRM(lock_free)(dev,
+ &dev->lock.hw_lock->lock,
+ _DRM_LOCKING_CONTEXT(dev->lock.hw_lock->lock));
+
+ /* FIXME: may require heavy-handed reset of
+ hardware at this point, possibly
+ processed via a callback to the X
+ server. */
+ }
+#if __HAVE_RELEASE
+ else if ( dev->lock.hw_lock ) {
+ /* The lock is required to reclaim buffers */
+ for (;;) {
+ if ( !dev->lock.hw_lock ) {
+ /* Device has been unregistered */
+ retcode = EINTR;
+ break;
+ }
+ if ( DRM(lock_take)( &dev->lock.hw_lock->lock,
+ DRM_KERNEL_CONTEXT ) ) {
+ dev->lock.pid = p->p_pid;
+ dev->lock.lock_time = jiffies;
+ atomic_inc( &dev->counts[_DRM_STAT_LOCKS] );
+ break; /* Got lock */
+ }
+ /* Contention */
+#if 0
+ atomic_inc( &dev->total_sleeps );
+#endif
+ retcode = tsleep(&dev->lock.lock_queue,
+ PZERO|PCATCH,
+ "drmlk2",
+ 0);
+ if (retcode)
+ break;
+ }
+ if( !retcode ) {
+ DRIVER_RELEASE();
+ DRM(lock_free)( dev, &dev->lock.hw_lock->lock,
+ DRM_KERNEL_CONTEXT );
+ }
+ }
+#elif __HAVE_DMA
+ DRM(reclaim_buffers)( dev, priv->pid );
+#endif
+
+ funsetown(dev->buf_sigio);
+
+ DRM_OS_LOCK;
+ priv = DRM(find_file_by_proc)(dev, p);
+ if (priv) {
+ priv->refs--;
+ if (!priv->refs) {
+ TAILQ_REMOVE(&dev->files, priv, link);
+ }
+ }
+ DRM_OS_UNLOCK;
+
+ DRM(free)( priv, sizeof(*priv), DRM_MEM_FILES );
+
+ /* ========================================================
+ * End inline drm_release
+ */
+
+ atomic_inc( &dev->counts[_DRM_STAT_CLOSES] );
+ DRM_OS_SPINLOCK( &dev->count_lock );
+ if ( !--dev->open_count ) {
+ if ( atomic_read( &dev->ioctl_count ) || dev->blocked ) {
+ DRM_ERROR( "Device busy: %ld %d\n",
+ (unsigned long)atomic_read( &dev->ioctl_count ),
+ dev->blocked );
+ DRM_OS_SPINUNLOCK( &dev->count_lock );
+ DRM_OS_RETURN(EBUSY);
+ }
+ DRM_OS_SPINUNLOCK( &dev->count_lock );
+ device_unbusy(dev->device);
+ return DRM(takedown)( dev );
+ }
+ DRM_OS_SPINUNLOCK( &dev->count_lock );
+
+
+ DRM_OS_RETURN(retcode);
+}
+
+/* DRM(ioctl) is called whenever a process performs an ioctl on /dev/drm.
+ */
+int DRM(ioctl)( DRM_OS_IOCTL )
+{
+ DRM_OS_DEVICE;
+ int retcode = 0;
+ drm_ioctl_desc_t *ioctl;
+ d_ioctl_t *func;
+ int nr = DRM_IOCTL_NR(cmd);
+ DRM_OS_PRIV;
+
+ atomic_inc( &dev->ioctl_count );
+ atomic_inc( &dev->counts[_DRM_STAT_IOCTLS] );
+ ++priv->ioctl_count;
+
+ DRM_DEBUG( "pid=%d, cmd=0x%02lx, nr=0x%02x, dev 0x%lx, auth=%d\n",
+ DRM_OS_CURRENTPID, cmd, nr, (long)dev->device, priv->authenticated );
+
+ switch (cmd) {
+ case FIONBIO:
+ atomic_dec(&dev->ioctl_count);
+ return 0;
+
+ case FIOASYNC:
+ atomic_dec(&dev->ioctl_count);
+ dev->flags |= FASYNC;
+ return 0;
+
+ case FIOSETOWN:
+ atomic_dec(&dev->ioctl_count);
+ return fsetown(*(int *)data, &dev->buf_sigio);
+
+ case FIOGETOWN:
+ atomic_dec(&dev->ioctl_count);
+ *(int *) data = fgetown(dev->buf_sigio);
+ return 0;
+ }
+
+ if ( nr >= DRIVER_IOCTL_COUNT ) {
+ retcode = EINVAL;
+ } else {
+ ioctl = &DRM(ioctls)[nr];
+ func = ioctl->func;
+
+ if ( !func ) {
+ DRM_DEBUG( "no function\n" );
+ retcode = EINVAL;
+ } else if ( ( ioctl->root_only && DRM_OS_CHECKSUSER )
+ || ( ioctl->auth_needed && !priv->authenticated ) ) {
+ retcode = EACCES;
+ } else {
+ retcode = func( kdev, cmd, data, flags, p );
+ }
+ }
+
+ atomic_dec( &dev->ioctl_count );
+ DRM_OS_RETURN(retcode);
+}
+
+int DRM(lock)( DRM_OS_IOCTL )
+{
+ DRM_OS_DEVICE;
+ drm_lock_t lock;
+ int ret = 0;
+#if __HAVE_MULTIPLE_DMA_QUEUES
+ drm_queue_t *q;
+#endif
+#if __HAVE_DMA_HISTOGRAM
+ cycles_t start;
+
+ dev->lck_start = start = get_cycles();
+#endif
+
+ DRM_OS_KRNFROMUSR( lock, (drm_lock_t *)data, sizeof(lock) );
+
+ if ( lock.context == DRM_KERNEL_CONTEXT ) {
+ DRM_ERROR( "Process %d using kernel context %d\n",
+ DRM_OS_CURRENTPID, lock.context );
+ DRM_OS_RETURN(EINVAL);
+ }
+
+ DRM_DEBUG( "%d (pid %d) requests lock (0x%08x), flags = 0x%08x\n",
+ lock.context, DRM_OS_CURRENTPID,
+ dev->lock.hw_lock->lock, lock.flags );
+
+#if __HAVE_DMA_QUEUE
+ if ( lock.context < 0 )
+ DRM_OS_RETURN(EINVAL);
+#elif __HAVE_MULTIPLE_DMA_QUEUES
+ if ( lock.context < 0 || lock.context >= dev->queue_count )
+ DRM_OS_RETURN(EINVAL);
+ q = dev->queuelist[lock.context];
+#endif
+
+#if __HAVE_DMA_FLUSH
+ ret = DRM(flush_block_and_flush)( dev, lock.context, lock.flags );
+#endif
+ if ( !ret ) {
+ for (;;) {
+ if ( !dev->lock.hw_lock ) {
+ /* Device has been unregistered */
+ ret = EINTR;
+ break;
+ }
+ if ( DRM(lock_take)( &dev->lock.hw_lock->lock,
+ lock.context ) ) {
+ dev->lock.pid = DRM_OS_CURRENTPID;
+ dev->lock.lock_time = jiffies;
+ atomic_inc( &dev->counts[_DRM_STAT_LOCKS] );
+ break; /* Got lock */
+ }
+
+ /* Contention */
+ ret = tsleep(&dev->lock.lock_queue,
+ PZERO|PCATCH,
+ "drmlk2",
+ 0);
+ if (ret)
+ break;
+ }
+ }
+
+#if __HAVE_DMA_FLUSH
+ DRM(flush_unblock)( dev, lock.context, lock.flags ); /* cleanup phase */
+#endif
+
+ if ( !ret ) {
+
+#if __HAVE_DMA_READY
+ if ( lock.flags & _DRM_LOCK_READY ) {
+ DRIVER_DMA_READY();
+ }
+#endif
+#if __HAVE_DMA_QUIESCENT
+ if ( lock.flags & _DRM_LOCK_QUIESCENT ) {
+ DRIVER_DMA_QUIESCENT();
+ }
+#endif
+#if __HAVE_KERNEL_CTX_SWITCH
+ if ( dev->last_context != lock.context ) {
+ DRM(context_switch)(dev, dev->last_context,
+ lock.context);
+ }
+#endif
+ }
+
+ DRM_DEBUG( "%d %s\n", lock.context, ret ? "interrupted" : "has lock" );
+
+#if __HAVE_DMA_HISTOGRAM
+ atomic_inc(&dev->histo.lacq[DRM(histogram_slot)(get_cycles()-start)]);
+#endif
+
+ DRM_OS_RETURN(ret);
+}
+
+
+int DRM(unlock)( DRM_OS_IOCTL )
+{
+ DRM_OS_DEVICE;
+ drm_lock_t lock;
+
+ DRM_OS_KRNFROMUSR( lock, (drm_lock_t *)data, sizeof(lock) ) ;
+
+ if ( lock.context == DRM_KERNEL_CONTEXT ) {
+ DRM_ERROR( "Process %d using kernel context %d\n",
+ DRM_OS_CURRENTPID, lock.context );
+ DRM_OS_RETURN(EINVAL);
+ }
+
+ atomic_inc( &dev->counts[_DRM_STAT_UNLOCKS] );
+
+#if __HAVE_KERNEL_CTX_SWITCH
+ /* We no longer really hold it, but if we are the next
+ * agent to request it then we should just be able to
+ * take it immediately and not eat the ioctl.
+ */
+ dev->lock.pid = 0;
+ {
+ __volatile__ unsigned int *plock = &dev->lock.hw_lock->lock;
+ unsigned int old, new, prev, ctx;
+
+ ctx = lock.context;
+ do {
+ old = *plock;
+ new = ctx;
+ prev = cmpxchg(plock, old, new);
+ } while (prev != old);
+ }
+ wake_up_interruptible(&dev->lock.lock_queue);
+#else
+ DRM(lock_transfer)( dev, &dev->lock.hw_lock->lock,
+ DRM_KERNEL_CONTEXT );
+#if __HAVE_DMA_SCHEDULE
+ DRM(dma_schedule)( dev, 1 );
+#endif
+
+ /* FIXME: Do we ever really need to check this???
+ */
+ if ( 1 /* !dev->context_flag */ ) {
+ if ( DRM(lock_free)( dev, &dev->lock.hw_lock->lock,
+ DRM_KERNEL_CONTEXT ) ) {
+ DRM_ERROR( "\n" );
+ }
+ }
+#endif /* !__HAVE_KERNEL_CTX_SWITCH */
+
+ return 0;
+}
+
+#if DRM_LINUX
+static linux_ioctl_function_t DRM( linux_ioctl);
+static struct linux_ioctl_handler DRM( handler) = {DRM( linux_ioctl), LINUX_IOCTL_DRM_MIN, LINUX_IOCTL_DRM_MAX};
+SYSINIT (DRM( register), SI_SUB_KLD, SI_ORDER_MIDDLE, linux_ioctl_register_handler, &DRM( handler));
+SYSUNINIT(DRM( unregister), SI_SUB_KLD, SI_ORDER_MIDDLE, linux_ioctl_unregister_handler, &DRM( handler));
+
+/*
+ * Linux emulation IOCTL
+ */
+static int
+DRM(linux_ioctl)(DRM_OS_STRUCTPROC *p, struct linux_ioctl_args* args)
+{
+#if (__FreeBSD_version >= 500000)
+ struct file *fp = p->td_proc->p_fd->fd_ofiles[args->fd];
+#else
+ struct file *fp = p->p_fd->fd_ofiles[args->fd];
+#endif
+ u_long cmd = args->cmd;
+ caddr_t data = (caddr_t) args->arg;
+ /*
+ * Pass the ioctl off to our standard handler.
+ */
+ return(fo_ioctl(fp, cmd, data, p));
+}
+#endif /* DRM_LINUX */
diff --git a/bsd-core/drm_fops.c b/bsd-core/drm_fops.c
new file mode 100644
index 00000000..53af39cd
--- /dev/null
+++ b/bsd-core/drm_fops.c
@@ -0,0 +1,223 @@
+/* drm_fops.h -- File operations for DRM -*- linux-c -*-
+ * Created: Mon Jan 4 08:58:31 1999 by faith@valinux.com
+ *
+ * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
+ * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors:
+ * Rickard E. (Rik) Faith <faith@valinux.com>
+ * Daryll Strauss <daryll@valinux.com>
+ * Gareth Hughes <gareth@valinux.com>
+ */
+
+#define __NO_VERSION__
+#include "drmP.h"
+
+
+#include <sys/signalvar.h>
+#include <sys/poll.h>
+
+drm_file_t *DRM(find_file_by_proc)(drm_device_t *dev, DRM_OS_STRUCTPROC *p)
+{
+#if __FreeBSD_version >= 500021
+ uid_t uid = p->td_proc->p_ucred->cr_svuid;
+ pid_t pid = p->td_proc->p_pid;
+#else
+ uid_t uid = p->p_cred->p_svuid;
+ pid_t pid = p->p_pid;
+#endif
+ drm_file_t *priv;
+
+ TAILQ_FOREACH(priv, &dev->files, link)
+ if (priv->pid == pid && priv->uid == uid)
+ return priv;
+ return NULL;
+}
+
+/* DRM(open) is called whenever a process opens /dev/drm. */
+
+int DRM(open_helper)(dev_t kdev, int flags, int fmt, DRM_OS_STRUCTPROC *p,
+ drm_device_t *dev)
+{
+ int m = minor(kdev);
+ drm_file_t *priv;
+
+ if (flags & O_EXCL)
+ return EBUSY; /* No exclusive opens */
+ dev->flags = flags;
+ if (!DRM(cpu_valid)())
+ DRM_OS_RETURN(EINVAL);
+
+ DRM_DEBUG("pid = %d, minor = %d\n", DRM_OS_CURRENTPID, m);
+
+ /* FIXME: linux mallocs and bzeros here */
+ priv = (drm_file_t *) DRM(find_file_by_proc)(dev, p);
+ if (priv) {
+ priv->refs++;
+ } else {
+ priv = (drm_file_t *) DRM(alloc)(sizeof(*priv), DRM_MEM_FILES);
+ bzero(priv, sizeof(*priv));
+#if __FreeBSD_version >= 500000
+ priv->uid = p->td_proc->p_ucred->cr_svuid;
+ priv->pid = p->td_proc->p_pid;
+#else
+ priv->uid = p->p_cred->p_svuid;
+ priv->pid = p->p_pid;
+#endif
+
+ priv->refs = 1;
+ priv->minor = m;
+ priv->devXX = dev;
+ priv->ioctl_count = 0;
+ priv->authenticated = !DRM_OS_CHECKSUSER;
+ lockmgr(&dev->dev_lock, LK_EXCLUSIVE, 0, p);
+ TAILQ_INSERT_TAIL(&dev->files, priv, link);
+ lockmgr(&dev->dev_lock, LK_RELEASE, 0, p);
+ }
+
+ kdev->si_drv1 = dev;
+
+
+ return 0;
+}
+
+
+/* The drm_read and drm_write_string code (especially that which manages
+ the circular buffer), is based on Alessandro Rubini's LINUX DEVICE
+ DRIVERS (Cambridge: O'Reilly, 1998), pages 111-113. */
+
+ssize_t DRM(read)(dev_t kdev, struct uio *uio, int ioflag)
+{
+ DRM_OS_DEVICE;
+ int left;
+ int avail;
+ int send;
+ int cur;
+ int error = 0;
+
+ DRM_DEBUG("%p, %p\n", dev->buf_rp, dev->buf_wp);
+
+ while (dev->buf_rp == dev->buf_wp) {
+ DRM_DEBUG(" sleeping\n");
+ if (dev->flags & FASYNC)
+ return EWOULDBLOCK;
+ error = tsleep(&dev->buf_rp, PZERO|PCATCH, "drmrd", 0);
+ if (error) {
+ DRM_DEBUG(" interrupted\n");
+ return error;
+ }
+ DRM_DEBUG(" awake\n");
+ }
+
+ left = (dev->buf_rp + DRM_BSZ - dev->buf_wp) % DRM_BSZ;
+ avail = DRM_BSZ - left;
+ send = DRM_MIN(avail, uio->uio_resid);
+
+ while (send) {
+ if (dev->buf_wp > dev->buf_rp) {
+ cur = DRM_MIN(send, dev->buf_wp - dev->buf_rp);
+ } else {
+ cur = DRM_MIN(send, dev->buf_end - dev->buf_rp);
+ }
+ error = uiomove(dev->buf_rp, cur, uio);
+ if (error)
+ break;
+ dev->buf_rp += cur;
+ if (dev->buf_rp == dev->buf_end) dev->buf_rp = dev->buf;
+ send -= cur;
+ }
+
+ wakeup(&dev->buf_wp);
+ return error;
+}
+
+int DRM(write_string)(drm_device_t *dev, const char *s)
+{
+ int left = (dev->buf_rp + DRM_BSZ - dev->buf_wp) % DRM_BSZ;
+ int send = strlen(s);
+ int count;
+
+ DRM_DEBUG("%d left, %d to send (%p, %p)\n",
+ left, send, dev->buf_rp, dev->buf_wp);
+
+ if (left == 1 || dev->buf_wp != dev->buf_rp) {
+ DRM_ERROR("Buffer not empty (%d left, wp = %p, rp = %p)\n",
+ left,
+ dev->buf_wp,
+ dev->buf_rp);
+ }
+
+ while (send) {
+ if (dev->buf_wp >= dev->buf_rp) {
+ count = DRM_MIN(send, dev->buf_end - dev->buf_wp);
+ if (count == left) --count; /* Leave a hole */
+ } else {
+ count = DRM_MIN(send, dev->buf_rp - dev->buf_wp - 1);
+ }
+ strncpy(dev->buf_wp, s, count);
+ dev->buf_wp += count;
+ if (dev->buf_wp == dev->buf_end) dev->buf_wp = dev->buf;
+ send -= count;
+ }
+
+ if (dev->buf_selecting) {
+ dev->buf_selecting = 0;
+ selwakeup(&dev->buf_sel);
+ }
+
+ DRM_DEBUG("dev->buf_sigio=%p\n", dev->buf_sigio);
+ if (dev->buf_sigio) {
+ DRM_DEBUG("dev->buf_sigio->sio_pgid=%d\n", dev->buf_sigio->sio_pgid);
+ pgsigio(dev->buf_sigio, SIGIO, 0);
+ }
+ DRM_DEBUG("waking\n");
+ wakeup(&dev->buf_rp);
+
+ return 0;
+}
+
+int DRM(poll)(dev_t kdev, int events, DRM_OS_STRUCTPROC *p)
+{
+ drm_device_t *dev = kdev->si_drv1;
+ int s;
+ int revents = 0;
+
+ s = spldrm();
+ if (events & (POLLIN | POLLRDNORM)) {
+ int left = (dev->buf_rp + DRM_BSZ - dev->buf_wp) % DRM_BSZ;
+ if (left > 0)
+ revents |= events & (POLLIN | POLLRDNORM);
+ else
+ selrecord(p, &dev->buf_sel);
+ }
+ splx(s);
+
+ return revents;
+}
+
+int DRM(write)(dev_t kdev, struct uio *uio, int ioflag)
+{
+ DRM_DEBUG("pid = %d, device = %p, open_count = %d\n",
+ curproc->p_pid, ((drm_device_t *)kdev->si_drv1)->device, ((drm_device_t *)kdev->si_drv1)->open_count);
+ return 0;
+}
diff --git a/bsd-core/drm_ioctl.c b/bsd-core/drm_ioctl.c
new file mode 100644
index 00000000..1e8281e6
--- /dev/null
+++ b/bsd-core/drm_ioctl.c
@@ -0,0 +1,237 @@
+/* drm_ioctl.h -- IOCTL processing for DRM -*- linux-c -*-
+ * Created: Fri Jan 8 09:01:26 1999 by faith@valinux.com
+ *
+ * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
+ * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors:
+ * Rickard E. (Rik) Faith <faith@valinux.com>
+ * Gareth Hughes <gareth@valinux.com>
+ */
+
+#define __NO_VERSION__
+#include "drmP.h"
+#include <sys/bus.h>
+#include <pci/pcivar.h>
+
+int DRM(irq_busid)( DRM_OS_IOCTL )
+{
+ drm_irq_busid_t id;
+ devclass_t pci;
+ device_t bus, dev;
+ device_t *kids;
+ int error, i, num_kids;
+
+ DRM_OS_KRNFROMUSR( id, (drm_irq_busid_t *)data, sizeof(id) );
+
+ pci = devclass_find("pci");
+ if (!pci)
+ return ENOENT;
+ bus = devclass_get_device(pci, id.busnum);
+ if (!bus)
+ return ENOENT;
+ error = device_get_children(bus, &kids, &num_kids);
+ if (error)
+ return error;
+
+ dev = 0;
+ for (i = 0; i < num_kids; i++) {
+ dev = kids[i];
+ if (pci_get_slot(dev) == id.devnum
+ && pci_get_function(dev) == id.funcnum)
+ break;
+ }
+
+ free(kids, M_TEMP);
+
+ if (i != num_kids)
+ id.irq = pci_get_irq(dev);
+ else
+ id.irq = 0;
+ DRM_DEBUG("%d:%d:%d => IRQ %d\n",
+ id.busnum, id.devnum, id.funcnum, id.irq);
+
+ DRM_OS_KRNTOUSR( (drm_irq_busid_t *)data, id, sizeof(id) );
+
+ return 0;
+}
+
+int DRM(getunique)( DRM_OS_IOCTL )
+{
+ DRM_OS_DEVICE;
+ drm_unique_t u;
+
+ DRM_OS_KRNFROMUSR( u, (drm_unique_t *)data, sizeof(u) );
+
+ if (u.unique_len >= dev->unique_len) {
+ if (DRM_OS_COPYTOUSR(u.unique, dev->unique, dev->unique_len))
+ DRM_OS_RETURN(EFAULT);
+ }
+ u.unique_len = dev->unique_len;
+
+ DRM_OS_KRNTOUSR( (drm_unique_t *)data, u, sizeof(u) );
+
+ return 0;
+}
+
+int DRM(setunique)( DRM_OS_IOCTL )
+{
+ DRM_OS_DEVICE;
+ drm_unique_t u;
+
+ if (dev->unique_len || dev->unique)
+ DRM_OS_RETURN(EBUSY);
+
+ DRM_OS_KRNFROMUSR( u, (drm_unique_t *)data, sizeof(u) );
+
+ if (!u.unique_len || u.unique_len > 1024)
+ DRM_OS_RETURN(EINVAL);
+
+ dev->unique_len = u.unique_len;
+ dev->unique = DRM(alloc)(u.unique_len + 1, DRM_MEM_DRIVER);
+
+ if(!dev->unique) DRM_OS_RETURN(ENOMEM);
+
+ if (DRM_OS_COPYFROMUSR(dev->unique, u.unique, dev->unique_len))
+ DRM_OS_RETURN(EFAULT);
+
+ dev->unique[dev->unique_len] = '\0';
+
+ dev->devname = DRM(alloc)(strlen(dev->name) + strlen(dev->unique) + 2,
+ DRM_MEM_DRIVER);
+ if(!dev->devname) {
+ DRM(free)(dev->devname, sizeof(*dev->devname), DRM_MEM_DRIVER);
+ DRM_OS_RETURN(ENOMEM);
+ }
+ sprintf(dev->devname, "%s@%s", dev->name, dev->unique);
+
+
+ return 0;
+}
+
+
+int DRM(getmap)( DRM_OS_IOCTL )
+{
+ DRM_OS_DEVICE;
+ drm_map_t map;
+ drm_map_t *mapinlist;
+ drm_map_list_entry_t *list;
+ int idx;
+ int i = 0;
+
+ DRM_OS_KRNFROMUSR( map, (drm_map_t *)data, sizeof(map) );
+
+ idx = map.offset;
+
+ DRM_OS_LOCK;
+ if (idx < 0 || idx >= dev->map_count) {
+ DRM_OS_UNLOCK;
+ DRM_OS_RETURN(EINVAL);
+ }
+
+ TAILQ_FOREACH(list, dev->maplist, link) {
+ mapinlist = list->map;
+ if (i==idx) {
+ map.offset = mapinlist->offset;
+ map.size = mapinlist->size;
+ map.type = mapinlist->type;
+ map.flags = mapinlist->flags;
+ map.handle = mapinlist->handle;
+ map.mtrr = mapinlist->mtrr;
+ break;
+ }
+ i++;
+ }
+
+ DRM_OS_UNLOCK;
+
+ if (!list)
+ return EINVAL;
+
+ DRM_OS_KRNTOUSR( (drm_map_t *)data, map, sizeof(map) );
+
+ return 0;
+}
+
+int DRM(getclient)( DRM_OS_IOCTL )
+{
+ DRM_OS_DEVICE;
+ drm_client_t client;
+ drm_file_t *pt;
+ int idx;
+ int i = 0;
+
+ DRM_OS_KRNFROMUSR( client, (drm_client_t *)data, sizeof(client) );
+
+ idx = client.idx;
+ DRM_OS_LOCK;
+ TAILQ_FOREACH(pt, &dev->files, link) {
+ if (i==idx)
+ {
+ client.auth = pt->authenticated;
+ client.pid = pt->pid;
+ client.uid = pt->uid;
+ client.magic = pt->magic;
+ client.iocs = pt->ioctl_count;
+ DRM_OS_UNLOCK;
+
+ *(drm_client_t *)data = client;
+ return 0;
+ }
+ i++;
+ }
+ DRM_OS_UNLOCK;
+
+ DRM_OS_KRNTOUSR( (drm_client_t *)data, client, sizeof(client) );
+
+ return 0;
+}
+
+int DRM(getstats)( DRM_OS_IOCTL )
+{
+ DRM_OS_DEVICE;
+ drm_stats_t stats;
+ int i;
+
+ memset(&stats, 0, sizeof(stats));
+
+ DRM_OS_LOCK;
+
+ for (i = 0; i < dev->counters; i++) {
+ if (dev->types[i] == _DRM_STAT_LOCK)
+ stats.data[i].value
+ = (dev->lock.hw_lock
+ ? dev->lock.hw_lock->lock : 0);
+ else
+ stats.data[i].value = atomic_read(&dev->counts[i]);
+ stats.data[i].type = dev->types[i];
+ }
+
+ stats.count = dev->counters;
+
+ DRM_OS_UNLOCK;
+
+ DRM_OS_KRNTOUSR( (drm_stats_t *)data, stats, sizeof(stats) );
+
+ return 0;
+}
diff --git a/bsd-core/drm_lock.c b/bsd-core/drm_lock.c
new file mode 100644
index 00000000..863a228c
--- /dev/null
+++ b/bsd-core/drm_lock.c
@@ -0,0 +1,244 @@
+/* lock.c -- IOCTLs for locking -*- linux-c -*-
+ * Created: Tue Feb 2 08:37:54 1999 by faith@valinux.com
+ *
+ * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
+ * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors:
+ * Rickard E. (Rik) Faith <faith@valinux.com>
+ * Gareth Hughes <gareth@valinux.com>
+ */
+
+#define __NO_VERSION__
+#include "drmP.h"
+
+int DRM(block)( DRM_OS_IOCTL )
+{
+ DRM_DEBUG("\n");
+ return 0;
+}
+
+int DRM(unblock)( DRM_OS_IOCTL )
+{
+ DRM_DEBUG("\n");
+ return 0;
+}
+
+int DRM(lock_take)(__volatile__ unsigned int *lock, unsigned int context)
+{
+ unsigned int old, new;
+
+ char failed;
+
+ do {
+ old = *lock;
+ if (old & _DRM_LOCK_HELD) new = old | _DRM_LOCK_CONT;
+ else new = context | _DRM_LOCK_HELD;
+ _DRM_CAS(lock, old, new, failed);
+ } while (failed);
+ if (_DRM_LOCKING_CONTEXT(old) == context) {
+ if (old & _DRM_LOCK_HELD) {
+ if (context != DRM_KERNEL_CONTEXT) {
+ DRM_ERROR("%d holds heavyweight lock\n",
+ context);
+ }
+ return 0;
+ }
+ }
+ if (new == (context | _DRM_LOCK_HELD)) {
+ /* Have lock */
+ return 1;
+ }
+ return 0;
+}
+
+/* This takes a lock forcibly and hands it to context. Should ONLY be used
+ inside *_unlock to give lock to kernel before calling *_dma_schedule. */
+int DRM(lock_transfer)(drm_device_t *dev,
+ __volatile__ unsigned int *lock, unsigned int context)
+{
+ unsigned int old, new;
+ char failed;
+
+ dev->lock.pid = 0;
+ do {
+ old = *lock;
+ new = context | _DRM_LOCK_HELD;
+ _DRM_CAS(lock, old, new, failed);
+ } while (failed);
+ return 1;
+}
+
+int DRM(lock_free)(drm_device_t *dev,
+ __volatile__ unsigned int *lock, unsigned int context)
+{
+ unsigned int old, new;
+ pid_t pid = dev->lock.pid;
+ char failed;
+
+ dev->lock.pid = 0;
+ do {
+ old = *lock;
+ new = 0;
+ _DRM_CAS(lock, old, new, failed);
+ } while (failed);
+ if (_DRM_LOCK_IS_HELD(old) && _DRM_LOCKING_CONTEXT(old) != context) {
+ DRM_ERROR("%d freed heavyweight lock held by %d (pid %d)\n",
+ context,
+ _DRM_LOCKING_CONTEXT(old),
+ pid);
+ return 1;
+ }
+ DRM_OS_WAKEUP_INT(&dev->lock.lock_queue);
+ return 0;
+}
+
+static int DRM(flush_queue)(drm_device_t *dev, int context)
+{
+ int error;
+ int ret = 0;
+ drm_queue_t *q = dev->queuelist[context];
+
+ DRM_DEBUG("\n");
+
+ atomic_inc(&q->use_count);
+ if (atomic_read(&q->use_count) > 1) {
+ atomic_inc(&q->block_write);
+ atomic_inc(&q->block_count);
+ error = tsleep(&q->flush_queue, PZERO|PCATCH, "drmfq", 0);
+ if (error)
+ return error;
+ atomic_dec(&q->block_count);
+ }
+ atomic_dec(&q->use_count);
+
+ /* NOTE: block_write is still incremented!
+ Use drm_flush_unlock_queue to decrement. */
+ return ret;
+}
+
+static int DRM(flush_unblock_queue)(drm_device_t *dev, int context)
+{
+ drm_queue_t *q = dev->queuelist[context];
+
+ DRM_DEBUG("\n");
+
+ atomic_inc(&q->use_count);
+ if (atomic_read(&q->use_count) > 1) {
+ if (atomic_read(&q->block_write)) {
+ atomic_dec(&q->block_write);
+ DRM_OS_WAKEUP_INT(&q->write_queue);
+ }
+ }
+ atomic_dec(&q->use_count);
+ return 0;
+}
+
+int DRM(flush_block_and_flush)(drm_device_t *dev, int context,
+ drm_lock_flags_t flags)
+{
+ int ret = 0;
+ int i;
+
+ DRM_DEBUG("\n");
+
+ if (flags & _DRM_LOCK_FLUSH) {
+ ret = DRM(flush_queue)(dev, DRM_KERNEL_CONTEXT);
+ if (!ret) ret = DRM(flush_queue)(dev, context);
+ }
+ if (flags & _DRM_LOCK_FLUSH_ALL) {
+ for (i = 0; !ret && i < dev->queue_count; i++) {
+ ret = DRM(flush_queue)(dev, i);
+ }
+ }
+ return ret;
+}
+
+int DRM(flush_unblock)(drm_device_t *dev, int context, drm_lock_flags_t flags)
+{
+ int ret = 0;
+ int i;
+
+ DRM_DEBUG("\n");
+
+ if (flags & _DRM_LOCK_FLUSH) {
+ ret = DRM(flush_unblock_queue)(dev, DRM_KERNEL_CONTEXT);
+ if (!ret) ret = DRM(flush_unblock_queue)(dev, context);
+ }
+ if (flags & _DRM_LOCK_FLUSH_ALL) {
+ for (i = 0; !ret && i < dev->queue_count; i++) {
+ ret = DRM(flush_unblock_queue)(dev, i);
+ }
+ }
+
+ return ret;
+}
+
+int DRM(finish)( DRM_OS_IOCTL )
+{
+ DRM_OS_DEVICE;
+ int ret = 0;
+ drm_lock_t lock;
+
+ DRM_DEBUG("\n");
+
+ DRM_OS_KRNFROMUSR( lock, (drm_lock_t *)data, sizeof(lock) );
+
+ ret = DRM(flush_block_and_flush)(dev, lock.context, lock.flags);
+ DRM(flush_unblock)(dev, lock.context, lock.flags);
+ return ret;
+}
+
+/* If we get here, it means that the process has called DRM_IOCTL_LOCK
+ without calling DRM_IOCTL_UNLOCK.
+
+ If the lock is not held, then let the signal proceed as usual.
+
+ If the lock is held, then set the contended flag and keep the signal
+ blocked.
+
+
+ Return 1 if the signal should be delivered normally.
+ Return 0 if the signal should be blocked. */
+
+int DRM(notifier)(void *priv)
+{
+ drm_sigdata_t *s = (drm_sigdata_t *)priv;
+ unsigned int old, new;
+ char failed;
+
+
+ /* Allow signal delivery if lock isn't held */
+ if (!_DRM_LOCK_IS_HELD(s->lock->lock)
+ || _DRM_LOCKING_CONTEXT(s->lock->lock) != s->context) return 1;
+
+ /* Otherwise, set flag to force call to
+ drmUnlock */
+ do {
+ old = s->lock->lock;
+ new = old | _DRM_LOCK_CONT;
+ _DRM_CAS(&s->lock->lock, old, new, failed);
+ } while (failed);
+ return 0;
+}
+
diff --git a/bsd-core/drm_memory.c b/bsd-core/drm_memory.c
new file mode 100644
index 00000000..605b42ae
--- /dev/null
+++ b/bsd-core/drm_memory.c
@@ -0,0 +1,433 @@
+/* drm_memory.h -- Memory management wrappers for DRM -*- linux-c -*-
+ * Created: Thu Feb 4 14:00:34 1999 by faith@valinux.com
+ *
+ * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
+ * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors:
+ * Rickard E. (Rik) Faith <faith@valinux.com>
+ * Gareth Hughes <gareth@valinux.com>
+ */
+
+#define __NO_VERSION__
+#include "drmP.h"
+#include <vm/vm.h>
+#include <vm/pmap.h>
+#if __REALLY_HAVE_AGP
+#include <sys/agpio.h>
+#endif
+
+#define malloctype DRM(M_DRM)
+/* The macros confliced in the MALLOC_DEFINE */
+
+MALLOC_DEFINE(malloctype, "drm", "DRM Data Structures");
+
+#undef malloctype
+
+typedef struct drm_mem_stats {
+ const char *name;
+ int succeed_count;
+ int free_count;
+ int fail_count;
+ unsigned long bytes_allocated;
+ unsigned long bytes_freed;
+} drm_mem_stats_t;
+
+static DRM_OS_SPINTYPE DRM(mem_lock);
+static unsigned long DRM(ram_available) = 0; /* In pages */
+static unsigned long DRM(ram_used) = 0;
+static drm_mem_stats_t DRM(mem_stats)[] = {
+ [DRM_MEM_DMA] = { "dmabufs" },
+ [DRM_MEM_SAREA] = { "sareas" },
+ [DRM_MEM_DRIVER] = { "driver" },
+ [DRM_MEM_MAGIC] = { "magic" },
+ [DRM_MEM_IOCTLS] = { "ioctltab" },
+ [DRM_MEM_MAPS] = { "maplist" },
+ [DRM_MEM_VMAS] = { "vmalist" },
+ [DRM_MEM_BUFS] = { "buflist" },
+ [DRM_MEM_SEGS] = { "seglist" },
+ [DRM_MEM_PAGES] = { "pagelist" },
+ [DRM_MEM_FILES] = { "files" },
+ [DRM_MEM_QUEUES] = { "queues" },
+ [DRM_MEM_CMDS] = { "commands" },
+ [DRM_MEM_MAPPINGS] = { "mappings" },
+ [DRM_MEM_BUFLISTS] = { "buflists" },
+ [DRM_MEM_AGPLISTS] = { "agplist" },
+ [DRM_MEM_SGLISTS] = { "sglist" },
+ [DRM_MEM_TOTALAGP] = { "totalagp" },
+ [DRM_MEM_BOUNDAGP] = { "boundagp" },
+ [DRM_MEM_CTXBITMAP] = { "ctxbitmap"},
+ [DRM_MEM_STUB] = { "stub" },
+ { NULL, 0, } /* Last entry must be null */
+};
+
+void DRM(mem_init)(void)
+{
+ drm_mem_stats_t *mem;
+
+ DRM_OS_SPININIT(DRM(mem_lock), "drm memory");
+
+ for (mem = DRM(mem_stats); mem->name; ++mem) {
+ mem->succeed_count = 0;
+ mem->free_count = 0;
+ mem->fail_count = 0;
+ mem->bytes_allocated = 0;
+ mem->bytes_freed = 0;
+ }
+
+ DRM(ram_available) = 0; /* si.totalram */
+ DRM(ram_used) = 0;
+}
+
+/* drm_mem_info is called whenever a process reads /dev/drm/mem. */
+
+static int DRM(_mem_info) DRM_SYSCTL_HANDLER_ARGS
+{
+ drm_mem_stats_t *pt;
+ char buf[128];
+ int error;
+
+ DRM_SYSCTL_PRINT(" total counts "
+ " | outstanding \n");
+ DRM_SYSCTL_PRINT("type alloc freed fail bytes freed"
+ " | allocs bytes\n\n");
+ DRM_SYSCTL_PRINT("%-9.9s %5d %5d %4d %10lu |\n",
+ "system", 0, 0, 0, DRM(ram_available));
+ DRM_SYSCTL_PRINT("%-9.9s %5d %5d %4d %10lu |\n",
+ "locked", 0, 0, 0, DRM(ram_used));
+ DRM_SYSCTL_PRINT("\n");
+ for (pt = DRM(mem_stats); pt->name; pt++) {
+ DRM_SYSCTL_PRINT("%-9.9s %5d %5d %4d %10lu %10lu | %6d %10ld\n",
+ pt->name,
+ pt->succeed_count,
+ pt->free_count,
+ pt->fail_count,
+ pt->bytes_allocated,
+ pt->bytes_freed,
+ pt->succeed_count - pt->free_count,
+ (long)pt->bytes_allocated
+ - (long)pt->bytes_freed);
+ }
+ SYSCTL_OUT(req, "", 1);
+
+ return 0;
+}
+
+int DRM(mem_info) DRM_SYSCTL_HANDLER_ARGS
+{
+ int ret;
+
+ DRM_OS_SPINLOCK(&DRM(mem_lock));
+ ret = DRM(_mem_info)(oidp, arg1, arg2, req);
+ DRM_OS_SPINUNLOCK(&DRM(mem_lock));
+ return ret;
+}
+
+void *DRM(alloc)(size_t size, int area)
+{
+ void *pt;
+
+ if (!size) {
+ DRM_MEM_ERROR(area, "Allocating 0 bytes\n");
+ return NULL;
+ }
+
+ if (!(pt = malloc(size, DRM(M_DRM), M_NOWAIT))) {
+ DRM_OS_SPINLOCK(&DRM(mem_lock));
+ ++DRM(mem_stats)[area].fail_count;
+ DRM_OS_SPINUNLOCK(&DRM(mem_lock));
+ return NULL;
+ }
+ DRM_OS_SPINLOCK(&DRM(mem_lock));
+ ++DRM(mem_stats)[area].succeed_count;
+ DRM(mem_stats)[area].bytes_allocated += size;
+ DRM_OS_SPINUNLOCK(&DRM(mem_lock));
+ return pt;
+}
+
+void *DRM(realloc)(void *oldpt, size_t oldsize, size_t size, int area)
+{
+ void *pt;
+
+ if (!(pt = DRM(alloc)(size, area))) return NULL;
+ if (oldpt && oldsize) {
+ memcpy(pt, oldpt, oldsize);
+ DRM(free)(oldpt, oldsize, area);
+ }
+ return pt;
+}
+
+char *DRM(strdup)(const char *s, int area)
+{
+ char *pt;
+ int length = s ? strlen(s) : 0;
+
+ if (!(pt = DRM(alloc)(length+1, area))) return NULL;
+ strcpy(pt, s);
+ return pt;
+}
+
+void DRM(strfree)(char *s, int area)
+{
+ unsigned int size;
+
+ if (!s) return;
+
+ size = 1 + strlen(s);
+ DRM(free)((void *)s, size, area);
+}
+
+void DRM(free)(void *pt, size_t size, int area)
+{
+ int alloc_count;
+ int free_count;
+
+ if (!pt) DRM_MEM_ERROR(area, "Attempt to free NULL pointer\n");
+ else free(pt, DRM(M_DRM));
+ DRM_OS_SPINLOCK(&DRM(mem_lock));
+ DRM(mem_stats)[area].bytes_freed += size;
+ free_count = ++DRM(mem_stats)[area].free_count;
+ alloc_count = DRM(mem_stats)[area].succeed_count;
+ DRM_OS_SPINUNLOCK(&DRM(mem_lock));
+ if (free_count > alloc_count) {
+ DRM_MEM_ERROR(area, "Excess frees: %d frees, %d allocs\n",
+ free_count, alloc_count);
+ }
+}
+
+unsigned long DRM(alloc_pages)(int order, int area)
+{
+ vm_offset_t address;
+ unsigned long bytes = PAGE_SIZE << order;
+
+
+ address = (vm_offset_t) contigmalloc(bytes, DRM(M_DRM), M_WAITOK, 0, ~0, 1, 0);
+ if (!address) {
+ DRM_OS_SPINLOCK(&DRM(mem_lock));
+ ++DRM(mem_stats)[area].fail_count;
+ DRM_OS_SPINUNLOCK(&DRM(mem_lock));
+ return 0;
+ }
+ DRM_OS_SPINLOCK(&DRM(mem_lock));
+ ++DRM(mem_stats)[area].succeed_count;
+ DRM(mem_stats)[area].bytes_allocated += bytes;
+ DRM(ram_used) += bytes;
+ DRM_OS_SPINUNLOCK(&DRM(mem_lock));
+
+
+ /* Zero outside the lock */
+ memset((void *)address, 0, bytes);
+
+
+ return address;
+}
+
+void DRM(free_pages)(unsigned long address, int order, int area)
+{
+ unsigned long bytes = PAGE_SIZE << order;
+ int alloc_count;
+ int free_count;
+
+ if (!address) {
+ DRM_MEM_ERROR(area, "Attempt to free address 0\n");
+ } else {
+ contigfree((void *) address, bytes, DRM(M_DRM));
+ }
+
+ DRM_OS_SPINLOCK(&DRM(mem_lock));
+ free_count = ++DRM(mem_stats)[area].free_count;
+ alloc_count = DRM(mem_stats)[area].succeed_count;
+ DRM(mem_stats)[area].bytes_freed += bytes;
+ DRM(ram_used) -= bytes;
+ DRM_OS_SPINUNLOCK(&DRM(mem_lock));
+ if (free_count > alloc_count) {
+ DRM_MEM_ERROR(area,
+ "Excess frees: %d frees, %d allocs\n",
+ free_count, alloc_count);
+ }
+}
+
+void *DRM(ioremap)(unsigned long offset, unsigned long size)
+{
+ void *pt;
+
+ if (!size) {
+ DRM_MEM_ERROR(DRM_MEM_MAPPINGS,
+ "Mapping 0 bytes at 0x%08lx\n", offset);
+ return NULL;
+ }
+
+ if (!(pt = pmap_mapdev(offset, size))) {
+ DRM_OS_SPINLOCK(&DRM(mem_lock));
+ ++DRM(mem_stats)[DRM_MEM_MAPPINGS].fail_count;
+ DRM_OS_SPINUNLOCK(&DRM(mem_lock));
+ return NULL;
+ }
+ DRM_OS_SPINLOCK(&DRM(mem_lock));
+ ++DRM(mem_stats)[DRM_MEM_MAPPINGS].succeed_count;
+ DRM(mem_stats)[DRM_MEM_MAPPINGS].bytes_allocated += size;
+ DRM_OS_SPINUNLOCK(&DRM(mem_lock));
+ return pt;
+}
+
+void DRM(ioremapfree)(void *pt, unsigned long size)
+{
+ int alloc_count;
+ int free_count;
+
+ if (!pt)
+ DRM_MEM_ERROR(DRM_MEM_MAPPINGS,
+ "Attempt to free NULL pointer\n");
+ else
+ pmap_unmapdev((vm_offset_t) pt, size);
+
+ DRM_OS_SPINLOCK(&DRM(mem_lock));
+ DRM(mem_stats)[DRM_MEM_MAPPINGS].bytes_freed += size;
+ free_count = ++DRM(mem_stats)[DRM_MEM_MAPPINGS].free_count;
+ alloc_count = DRM(mem_stats)[DRM_MEM_MAPPINGS].succeed_count;
+ DRM_OS_SPINUNLOCK(&DRM(mem_lock));
+ if (free_count > alloc_count) {
+ DRM_MEM_ERROR(DRM_MEM_MAPPINGS,
+ "Excess frees: %d frees, %d allocs\n",
+ free_count, alloc_count);
+ }
+}
+
+#if __REALLY_HAVE_AGP
+agp_memory *DRM(alloc_agp)(int pages, u32 type)
+{
+ agp_memory *handle;
+
+ if (!pages) {
+ DRM_MEM_ERROR(DRM_MEM_TOTALAGP, "Allocating 0 pages\n");
+ return NULL;
+ }
+
+ if ((handle = DRM(agp_allocate_memory)(pages, type))) {
+ DRM_OS_SPINLOCK(&DRM(mem_lock));
+ ++DRM(mem_stats)[DRM_MEM_TOTALAGP].succeed_count;
+ DRM(mem_stats)[DRM_MEM_TOTALAGP].bytes_allocated
+ += pages << PAGE_SHIFT;
+ DRM_OS_SPINUNLOCK(&DRM(mem_lock));
+ return handle;
+ }
+ DRM_OS_SPINLOCK(&DRM(mem_lock));
+ ++DRM(mem_stats)[DRM_MEM_TOTALAGP].fail_count;
+ DRM_OS_SPINUNLOCK(&DRM(mem_lock));
+ return NULL;
+}
+
+int DRM(free_agp)(agp_memory *handle, int pages)
+{
+ int alloc_count;
+ int free_count;
+
+ if (!handle) {
+ DRM_MEM_ERROR(DRM_MEM_TOTALAGP,
+ "Attempt to free NULL AGP handle\n");
+ DRM_OS_RETURN(EINVAL);
+ }
+
+ if (DRM(agp_free_memory)(handle)) {
+ DRM_OS_SPINLOCK(&DRM(mem_lock));
+ free_count = ++DRM(mem_stats)[DRM_MEM_TOTALAGP].free_count;
+ alloc_count = DRM(mem_stats)[DRM_MEM_TOTALAGP].succeed_count;
+ DRM(mem_stats)[DRM_MEM_TOTALAGP].bytes_freed
+ += pages << PAGE_SHIFT;
+ DRM_OS_SPINUNLOCK(&DRM(mem_lock));
+ if (free_count > alloc_count) {
+ DRM_MEM_ERROR(DRM_MEM_TOTALAGP,
+ "Excess frees: %d frees, %d allocs\n",
+ free_count, alloc_count);
+ }
+ return 0;
+ }
+ DRM_OS_RETURN(EINVAL);
+}
+
+int DRM(bind_agp)(agp_memory *handle, unsigned int start)
+{
+ int retcode;
+ device_t dev = agp_find_device();
+ struct agp_memory_info info;
+
+ if (!dev)
+ return EINVAL;
+
+ if (!handle) {
+ DRM_MEM_ERROR(DRM_MEM_BOUNDAGP,
+ "Attempt to bind NULL AGP handle\n");
+ DRM_OS_RETURN(EINVAL);
+ }
+
+ if (!(retcode = DRM(agp_bind_memory)(handle, start))) {
+ DRM_OS_SPINLOCK(&DRM(mem_lock));
+ ++DRM(mem_stats)[DRM_MEM_BOUNDAGP].succeed_count;
+ agp_memory_info(dev, handle, &info);
+ DRM(mem_stats)[DRM_MEM_BOUNDAGP].bytes_allocated
+ += info.ami_size;
+ DRM_OS_SPINUNLOCK(&DRM(mem_lock));
+ DRM_OS_RETURN(0);
+ }
+ DRM_OS_SPINLOCK(&DRM(mem_lock));
+ ++DRM(mem_stats)[DRM_MEM_BOUNDAGP].fail_count;
+ DRM_OS_SPINUNLOCK(&DRM(mem_lock));
+ DRM_OS_RETURN(retcode);
+}
+
+int DRM(unbind_agp)(agp_memory *handle)
+{
+ int alloc_count;
+ int free_count;
+ int retcode = EINVAL;
+ device_t dev = agp_find_device();
+ struct agp_memory_info info;
+
+ if (!dev)
+ return EINVAL;
+
+ if (!handle) {
+ DRM_MEM_ERROR(DRM_MEM_BOUNDAGP,
+ "Attempt to unbind NULL AGP handle\n");
+ DRM_OS_RETURN(retcode);
+ }
+
+ agp_memory_info(dev, handle, &info);
+
+ if ((retcode = DRM(agp_unbind_memory)(handle)))
+ DRM_OS_RETURN(retcode);
+
+ DRM_OS_SPINLOCK(&DRM(mem_lock));
+ free_count = ++DRM(mem_stats)[DRM_MEM_BOUNDAGP].free_count;
+ alloc_count = DRM(mem_stats)[DRM_MEM_BOUNDAGP].succeed_count;
+ DRM(mem_stats)[DRM_MEM_BOUNDAGP].bytes_freed
+ += info.ami_size;
+ DRM_OS_SPINUNLOCK(&DRM(mem_lock));
+ if (free_count > alloc_count) {
+ DRM_MEM_ERROR(DRM_MEM_BOUNDAGP,
+ "Excess frees: %d frees, %d allocs\n",
+ free_count, alloc_count);
+ }
+ DRM_OS_RETURN(retcode);
+}
+#endif
diff --git a/bsd-core/drm_os_freebsd.h b/bsd-core/drm_os_freebsd.h
new file mode 100644
index 00000000..72c5baf6
--- /dev/null
+++ b/bsd-core/drm_os_freebsd.h
@@ -0,0 +1,375 @@
+#include <sys/param.h>
+#include <sys/queue.h>
+#include <sys/malloc.h>
+#include <sys/kernel.h>
+#include <sys/module.h>
+#include <sys/systm.h>
+#include <sys/conf.h>
+#include <sys/stat.h>
+#include <sys/proc.h>
+#include <sys/lock.h>
+#include <sys/fcntl.h>
+#include <sys/uio.h>
+#include <sys/filio.h>
+#include <sys/sysctl.h>
+#include <sys/select.h>
+#include <vm/vm.h>
+#include <vm/pmap.h>
+#include <machine/pmap.h>
+#if __FreeBSD_version >= 500000
+#include <sys/selinfo.h>
+#endif
+#include <sys/bus.h>
+#if __FreeBSD_version >= 400005
+#include <sys/taskqueue.h>
+#endif
+#if __FreeBSD_version >= 500000
+#include <sys/mutex.h>
+#endif
+
+#if __FreeBSD_version >= 400006
+#define __REALLY_HAVE_AGP __HAVE_AGP
+#endif
+
+#define __REALLY_HAVE_MTRR 0
+#define __REALLY_HAVE_SG 0
+
+#if __REALLY_HAVE_AGP
+#include <pci/agpvar.h>
+#endif
+
+#define DRM_TIME_SLICE (hz/20) /* Time slice for GLXContexts */
+
+#define DRM_DEV_MODE (S_IRUSR|S_IWUSR|S_IRGRP|S_IWGRP)
+#define DRM_DEV_UID 0
+#define DRM_DEV_GID 0
+
+
+#if __FreeBSD_version >= 500000
+#define DRM_OS_SPINTYPE struct mtx
+#define DRM_OS_SPININIT(l,name) mtx_init(&l, name, MTX_DEF)
+#define DRM_OS_SPINLOCK(l) mtx_lock(l)
+#define DRM_OS_SPINUNLOCK(u) mtx_unlock(u);
+#define DRM_OS_LOCK lockmgr(&dev->dev_lock, LK_EXCLUSIVE, 0, curthread)
+#define DRM_OS_UNLOCK lockmgr(&dev->dev_lock, LK_RELEASE, 0, curthread)
+#define DRM_OS_CURPROC curthread
+#define DRM_OS_STRUCTPROC struct thread
+#define DRM_OS_CURRENTPID curthread->td_proc->p_pid
+#define DRM_OS_IOCTL dev_t kdev, u_long cmd, caddr_t data, int flags, struct thread *p
+#define DRM_OS_CHECKSUSER suser(p->td_proc)
+#else
+#define DRM_OS_CURPROC curproc
+#define DRM_OS_STRUCTPROC struct proc
+#define DRM_OS_SPINTYPE struct simplelock
+#define DRM_OS_SPININIT(l,name) simple_lock_init(&l)
+#define DRM_OS_SPINLOCK(l) simple_lock(l)
+#define DRM_OS_SPINUNLOCK(u) simple_unlock(u);
+#define DRM_OS_IOCTL dev_t kdev, u_long cmd, caddr_t data, int flags, struct proc *p
+#define DRM_OS_LOCK lockmgr(&dev->dev_lock, LK_EXCLUSIVE, 0, curproc)
+#define DRM_OS_UNLOCK lockmgr(&dev->dev_lock, LK_RELEASE, 0, curproc)
+#define DRM_OS_CURRENTPID curproc->p_pid
+#define DRM_OS_CHECKSUSER suser(p)
+#endif
+
+#define DRM_OS_TASKQUEUE_ARGS void *dev, int pending
+#define DRM_OS_IRQ_ARGS void *device
+#define DRM_OS_DEVICE drm_device_t *dev = kdev->si_drv1
+#define DRM_OS_MALLOC(size) malloc( size, DRM(M_DRM), M_NOWAIT )
+#define DRM_OS_FREE(pt) free( pt, DRM(M_DRM) )
+#define DRM_OS_VTOPHYS(addr) vtophys(addr)
+
+#define DRM_OS_PRIV \
+ drm_file_t *priv = (drm_file_t *) DRM(find_file_by_proc)(dev, p); \
+ if (!priv) { \
+ DRM_DEBUG("can't find authenticator\n"); \
+ return EINVAL; \
+ }
+
+#define DRM_OS_DELAY( udelay ) \
+do { \
+ struct timeval tv1, tv2; \
+ microtime(&tv1); \
+ do { \
+ microtime(&tv2); \
+ } \
+ while (((tv2.tv_sec-tv1.tv_sec)*1000000 + tv2.tv_usec - tv1.tv_usec) < udelay ); \
+} while (0)
+
+#define DRM_OS_RETURN(v) return v;
+
+
+#define DRM_OS_KRNTOUSR(arg1, arg2, arg3) \
+ *arg1 = arg2
+#define DRM_OS_KRNFROMUSR(arg1, arg2, arg3) \
+ arg1 = *arg2
+#define DRM_OS_COPYTOUSR(arg1, arg2, arg3) \
+ copyout(arg2, arg1, arg3)
+#define DRM_OS_COPYFROMUSR(arg1, arg2, arg3) \
+ copyin(arg2, arg1, arg3)
+
+#define DRM_OS_READMEMORYBARRIER \
+{ \
+ int xchangeDummy; \
+ DRM_DEBUG("%s\n", __FUNCTION__); \
+ __asm__ volatile(" push %%eax ; xchg %%eax, %0 ; pop %%eax" : : "m" (xchangeDummy)); \
+ __asm__ volatile(" push %%eax ; push %%ebx ; push %%ecx ; push %%edx ;" \
+ " movl $0,%%eax ; cpuid ; pop %%edx ; pop %%ecx ; pop %%ebx ;" \
+ " pop %%eax" : /* no outputs */ : /* no inputs */ ); \
+} while (0);
+
+#define DRM_OS_WRITEMEMORYBARRIER DRM_OS_READMEMORYBARRIER
+
+#define DRM_OS_WAKEUP(w) wakeup(w)
+#define DRM_OS_WAKEUP_INT(w) wakeup(w)
+
+#define PAGE_ALIGN(addr) (((addr)+PAGE_SIZE-1)&PAGE_MASK)
+
+#define malloctype DRM(M_DRM)
+/* The macros confliced in the MALLOC_DEFINE */
+MALLOC_DECLARE(malloctype);
+#undef malloctype
+
+typedef struct drm_chipinfo
+{
+ int vendor;
+ int device;
+ int supported;
+ char *name;
+} drm_chipinfo_t;
+
+typedef unsigned long atomic_t;
+typedef u_int32_t cycles_t;
+typedef u_int32_t spinlock_t;
+typedef u_int32_t u32;
+typedef u_int16_t u16;
+typedef u_int8_t u8;
+#define atomic_set(p, v) (*(p) = (v))
+#define atomic_read(p) (*(p))
+#define atomic_inc(p) atomic_add_long(p, 1)
+#define atomic_dec(p) atomic_subtract_long(p, 1)
+#define atomic_add(n, p) atomic_add_long(p, n)
+#define atomic_sub(n, p) atomic_subtract_long(p, n)
+
+/* Fake this */
+static __inline unsigned int
+test_and_set_bit(int b, volatile unsigned long *p)
+{
+ int s = splhigh();
+ unsigned int m = 1<<b;
+ unsigned int r = *p & m;
+ *p |= m;
+ splx(s);
+ return r;
+}
+
+static __inline void
+clear_bit(int b, volatile unsigned long *p)
+{
+ atomic_clear_long(p + (b >> 5), 1 << (b & 0x1f));
+}
+
+static __inline void
+set_bit(int b, volatile unsigned long *p)
+{
+ atomic_set_long(p + (b >> 5), 1 << (b & 0x1f));
+}
+
+static __inline int
+test_bit(int b, volatile unsigned long *p)
+{
+ return p[b >> 5] & (1 << (b & 0x1f));
+}
+
+static __inline int
+find_first_zero_bit(volatile unsigned long *p, int max)
+{
+ int b;
+
+ for (b = 0; b < max; b += 32) {
+ if (p[b >> 5] != ~0) {
+ for (;;) {
+ if ((p[b >> 5] & (1 << (b & 0x1f))) == 0)
+ return b;
+ b++;
+ }
+ }
+ }
+ return max;
+}
+
+#define spldrm() spltty()
+
+#define memset(p, v, s) bzero(p, s)
+
+/*
+ * Fake out the module macros for versions of FreeBSD where they don't
+ * exist.
+ */
+#if (__FreeBSD_version < 500002 && __FreeBSD_version > 500000) || __FreeBSD_version < 420000
+/* FIXME: again, what's the exact date? */
+#define MODULE_VERSION(a,b) struct __hack
+#define MODULE_DEPEND(a,b,c,d,e) struct __hack
+
+#endif
+
+#define __drm_dummy_lock(lock) (*(__volatile__ unsigned int *)lock)
+#define _DRM_CAS(lock,old,new,__ret) \
+ do { \
+ int __dummy; /* Can't mark eax as clobbered */ \
+ __asm__ __volatile__( \
+ "lock ; cmpxchg %4,%1\n\t" \
+ "setnz %0" \
+ : "=d" (__ret), \
+ "=m" (__drm_dummy_lock(lock)), \
+ "=a" (__dummy) \
+ : "2" (old), \
+ "r" (new)); \
+ } while (0)
+
+/* Redefinitions to make templating easy */
+#define wait_queue_head_t long
+#define agp_memory void
+#define jiffies ticks
+
+ /* Macros to make printf easier */
+#define DRM_ERROR(fmt, arg...) \
+ printf("error: " "[" DRM_NAME ":" __FUNCTION__ "] *ERROR* " fmt , ##arg)
+#define DRM_MEM_ERROR(area, fmt, arg...) \
+ printf("error: " "[" DRM_NAME ":" __FUNCTION__ ":%s] *ERROR* " fmt , \
+ DRM(mem_stats)[area].name , ##arg)
+#define DRM_INFO(fmt, arg...) printf("info: " "[" DRM_NAME "] " fmt , ##arg)
+
+#if DRM_DEBUG_CODE
+#define DRM_DEBUG(fmt, arg...) \
+ do { \
+ if (DRM(flags) & DRM_FLAG_DEBUG) \
+ printf("[" DRM_NAME ":" __FUNCTION__ "] " fmt , \
+ ##arg); \
+ } while (0)
+#else
+#define DRM_DEBUG(fmt, arg...) do { } while (0)
+#endif
+
+#define DRM_PROC_LIMIT (PAGE_SIZE-80)
+
+#if (__FreeBSD_version >= 500000) || ((__FreeBSD_version < 500000) && (__FreeBSD_version >= 410002))
+#define DRM_SYSCTL_HANDLER_ARGS (SYSCTL_HANDLER_ARGS)
+#else
+#define DRM_SYSCTL_HANDLER_ARGS SYSCTL_HANDLER_ARGS
+#endif
+
+#define DRM_SYSCTL_PRINT(fmt, arg...) \
+ snprintf(buf, sizeof(buf), fmt, ##arg); \
+ error = SYSCTL_OUT(req, buf, strlen(buf)); \
+ if (error) return error;
+
+#define DRM_SYSCTL_PRINT_RET(ret, fmt, arg...) \
+ snprintf(buf, sizeof(buf), fmt, ##arg); \
+ error = SYSCTL_OUT(req, buf, strlen(buf)); \
+ if (error) { ret; return error; }
+
+
+#define DRM_FIND_MAP(dest, o) \
+ do { \
+ drm_map_list_entry_t *listentry; \
+ TAILQ_FOREACH(listentry, dev->maplist, link) { \
+ if ( listentry->map->offset == o ) { \
+ dest = listentry->map; \
+ break; \
+ } \
+ } \
+ } while (0)
+
+
+/* Internal functions */
+
+/* drm_drv.h */
+extern d_ioctl_t DRM(ioctl);
+extern d_ioctl_t DRM(lock);
+extern d_ioctl_t DRM(unlock);
+extern d_open_t DRM(open);
+extern d_close_t DRM(close);
+extern d_read_t DRM(read);
+extern d_write_t DRM(write);
+extern d_poll_t DRM(poll);
+extern d_mmap_t DRM(mmap);
+extern int DRM(open_helper)(dev_t kdev, int flags, int fmt,
+ DRM_OS_STRUCTPROC *p, drm_device_t *dev);
+extern drm_file_t *DRM(find_file_by_proc)(drm_device_t *dev,
+ DRM_OS_STRUCTPROC *p);
+
+/* Misc. IOCTL support (drm_ioctl.h) */
+extern d_ioctl_t DRM(irq_busid);
+extern d_ioctl_t DRM(getunique);
+extern d_ioctl_t DRM(setunique);
+extern d_ioctl_t DRM(getmap);
+extern d_ioctl_t DRM(getclient);
+extern d_ioctl_t DRM(getstats);
+
+/* Context IOCTL support (drm_context.h) */
+extern d_ioctl_t DRM(resctx);
+extern d_ioctl_t DRM(addctx);
+extern d_ioctl_t DRM(modctx);
+extern d_ioctl_t DRM(getctx);
+extern d_ioctl_t DRM(switchctx);
+extern d_ioctl_t DRM(newctx);
+extern d_ioctl_t DRM(rmctx);
+extern d_ioctl_t DRM(setsareactx);
+extern d_ioctl_t DRM(getsareactx);
+
+/* Drawable IOCTL support (drm_drawable.h) */
+extern d_ioctl_t DRM(adddraw);
+extern d_ioctl_t DRM(rmdraw);
+
+/* Authentication IOCTL support (drm_auth.h) */
+extern d_ioctl_t DRM(getmagic);
+extern d_ioctl_t DRM(authmagic);
+
+/* Locking IOCTL support (drm_lock.h) */
+extern d_ioctl_t DRM(block);
+extern d_ioctl_t DRM(unblock);
+extern d_ioctl_t DRM(finish);
+
+/* Buffer management support (drm_bufs.h) */
+extern d_ioctl_t DRM(addmap);
+extern d_ioctl_t DRM(rmmap);
+#if __HAVE_DMA
+extern d_ioctl_t DRM(addbufs_agp);
+extern d_ioctl_t DRM(addbufs_pci);
+extern d_ioctl_t DRM(addbufs_sg);
+extern d_ioctl_t DRM(addbufs);
+extern d_ioctl_t DRM(infobufs);
+extern d_ioctl_t DRM(markbufs);
+extern d_ioctl_t DRM(freebufs);
+extern d_ioctl_t DRM(mapbufs);
+#endif
+
+/* Memory management support (drm_memory.h) */
+extern int DRM(mem_info) DRM_SYSCTL_HANDLER_ARGS;
+
+/* DMA support (drm_dma.h) */
+#if __HAVE_DMA_IRQ
+extern d_ioctl_t DRM(control);
+#endif
+
+/* AGP/GART support (drm_agpsupport.h) */
+#if __REALLY_HAVE_AGP
+extern d_ioctl_t DRM(agp_acquire);
+extern d_ioctl_t DRM(agp_release);
+extern d_ioctl_t DRM(agp_enable);
+extern d_ioctl_t DRM(agp_info);
+extern d_ioctl_t DRM(agp_alloc);
+extern d_ioctl_t DRM(agp_free);
+extern d_ioctl_t DRM(agp_unbind);
+extern d_ioctl_t DRM(agp_bind);
+#endif
+
+/* Scatter Gather Support (drm_scatter.h) */
+#if __HAVE_SG
+extern d_ioctl_t DRM(sg_alloc);
+extern d_ioctl_t DRM(sg_free);
+#endif
+
+/* SysCtl Support (drm_sysctl.h) */
+extern int DRM(sysctl_init)(drm_device_t *dev);
+extern int DRM(sysctl_cleanup)(drm_device_t *dev);
diff --git a/bsd-core/drm_scatter.c b/bsd-core/drm_scatter.c
new file mode 100644
index 00000000..a6b8275f
--- /dev/null
+++ b/bsd-core/drm_scatter.c
@@ -0,0 +1,237 @@
+/* drm_scatter.h -- IOCTLs to manage scatter/gather memory -*- linux-c -*-
+ * Created: Mon Dec 18 23:20:54 2000 by gareth@valinux.com
+ *
+ * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ * Authors:
+ * Gareth Hughes <gareth@valinux.com>
+ */
+
+#define __NO_VERSION__
+#include <linux/config.h>
+#include <linux/vmalloc.h>
+#include "drmP.h"
+
+#define DEBUG_SCATTER 0
+
+void DRM(sg_cleanup)( drm_sg_mem_t *entry )
+{
+ struct page *page;
+ int i;
+
+ for ( i = 0 ; i < entry->pages ; i++ ) {
+ page = entry->pagelist[i];
+ if ( page )
+ ClearPageReserved( page );
+ }
+
+ vfree( entry->virtual );
+
+ DRM(free)( entry->busaddr,
+ entry->pages * sizeof(*entry->busaddr),
+ DRM_MEM_PAGES );
+ DRM(free)( entry->pagelist,
+ entry->pages * sizeof(*entry->pagelist),
+ DRM_MEM_PAGES );
+ DRM(free)( entry,
+ sizeof(*entry),
+ DRM_MEM_SGLISTS );
+}
+
+int DRM(sg_alloc)( struct inode *inode, struct file *filp,
+ unsigned int cmd, unsigned long arg )
+{
+ drm_file_t *priv = filp->private_data;
+ drm_device_t *dev = priv->dev;
+ drm_scatter_gather_t request;
+ drm_sg_mem_t *entry;
+ unsigned long pages, i, j;
+ pgd_t *pgd;
+ pmd_t *pmd;
+ pte_t *pte;
+
+ DRM_DEBUG( "%s\n", __FUNCTION__ );
+
+ if ( dev->sg )
+ return -EINVAL;
+
+ if ( copy_from_user( &request,
+ (drm_scatter_gather_t *)arg,
+ sizeof(request) ) )
+ return -EFAULT;
+
+ entry = DRM(alloc)( sizeof(*entry), DRM_MEM_SGLISTS );
+ if ( !entry )
+ return -ENOMEM;
+
+ memset( entry, 0, sizeof(*entry) );
+
+ pages = (request.size + PAGE_SIZE - 1) / PAGE_SIZE;
+ DRM_DEBUG( "sg size=%ld pages=%ld\n", request.size, pages );
+
+ entry->pages = pages;
+ entry->pagelist = DRM(alloc)( pages * sizeof(*entry->pagelist),
+ DRM_MEM_PAGES );
+ if ( !entry->pagelist ) {
+ DRM(free)( entry, sizeof(*entry), DRM_MEM_SGLISTS );
+ return -ENOMEM;
+ }
+
+ entry->busaddr = DRM(alloc)( pages * sizeof(*entry->busaddr),
+ DRM_MEM_PAGES );
+ if ( !entry->busaddr ) {
+ DRM(free)( entry->pagelist,
+ entry->pages * sizeof(*entry->pagelist),
+ DRM_MEM_PAGES );
+ DRM(free)( entry,
+ sizeof(*entry),
+ DRM_MEM_SGLISTS );
+ return -ENOMEM;
+ }
+ memset( (void *)entry->busaddr, 0, pages * sizeof(*entry->busaddr) );
+
+ entry->virtual = vmalloc_32( pages << PAGE_SHIFT );
+ if ( !entry->virtual ) {
+ DRM(free)( entry->busaddr,
+ entry->pages * sizeof(*entry->busaddr),
+ DRM_MEM_PAGES );
+ DRM(free)( entry->pagelist,
+ entry->pages * sizeof(*entry->pagelist),
+ DRM_MEM_PAGES );
+ DRM(free)( entry,
+ sizeof(*entry),
+ DRM_MEM_SGLISTS );
+ return -ENOMEM;
+ }
+
+ /* This also forces the mapping of COW pages, so our page list
+ * will be valid. Please don't remove it...
+ */
+ memset( entry->virtual, 0, pages << PAGE_SHIFT );
+
+ entry->handle = (unsigned long)entry->virtual;
+
+ DRM_DEBUG( "sg alloc handle = %08lx\n", entry->handle );
+ DRM_DEBUG( "sg alloc virtual = %p\n", entry->virtual );
+
+ for ( i = entry->handle, j = 0 ; j < pages ; i += PAGE_SIZE, j++ ) {
+ pgd = pgd_offset_k( i );
+ if ( !pgd_present( *pgd ) )
+ goto failed;
+
+ pmd = pmd_offset( pgd, i );
+ if ( !pmd_present( *pmd ) )
+ goto failed;
+
+ pte = pte_offset( pmd, i );
+ if ( !pte_present( *pte ) )
+ goto failed;
+
+ entry->pagelist[j] = pte_page( *pte );
+
+ SetPageReserved( entry->pagelist[j] );
+ }
+
+ request.handle = entry->handle;
+
+ if ( copy_to_user( (drm_scatter_gather_t *)arg,
+ &request,
+ sizeof(request) ) ) {
+ DRM(sg_cleanup)( entry );
+ return -EFAULT;
+ }
+
+ dev->sg = entry;
+
+#if DEBUG_SCATTER
+ /* Verify that each page points to its virtual address, and vice
+ * versa.
+ */
+ {
+ int error = 0;
+
+ for ( i = 0 ; i < pages ; i++ ) {
+ unsigned long *tmp;
+
+ tmp = page_address( entry->pagelist[i] );
+ for ( j = 0 ;
+ j < PAGE_SIZE / sizeof(unsigned long) ;
+ j++, tmp++ ) {
+ *tmp = 0xcafebabe;
+ }
+ tmp = (unsigned long *)((u8 *)entry->virtual +
+ (PAGE_SIZE * i));
+ for( j = 0 ;
+ j < PAGE_SIZE / sizeof(unsigned long) ;
+ j++, tmp++ ) {
+ if ( *tmp != 0xcafebabe && error == 0 ) {
+ error = 1;
+ DRM_ERROR( "Scatter allocation error, "
+ "pagelist does not match "
+ "virtual mapping\n" );
+ }
+ }
+ tmp = page_address( entry->pagelist[i] );
+ for(j = 0 ;
+ j < PAGE_SIZE / sizeof(unsigned long) ;
+ j++, tmp++) {
+ *tmp = 0;
+ }
+ }
+ if (error == 0)
+ DRM_ERROR( "Scatter allocation matches pagelist\n" );
+ }
+#endif
+
+ return 0;
+
+ failed:
+ DRM(sg_cleanup)( entry );
+ return -ENOMEM;
+}
+
+int DRM(sg_free)( struct inode *inode, struct file *filp,
+ unsigned int cmd, unsigned long arg )
+{
+ drm_file_t *priv = filp->private_data;
+ drm_device_t *dev = priv->dev;
+ drm_scatter_gather_t request;
+ drm_sg_mem_t *entry;
+
+ if ( copy_from_user( &request,
+ (drm_scatter_gather_t *)arg,
+ sizeof(request) ) )
+ return -EFAULT;
+
+ entry = dev->sg;
+ dev->sg = NULL;
+
+ if ( !entry || entry->handle != request.handle )
+ return -EINVAL;
+
+ DRM_DEBUG( "sg free virtual = %p\n", entry->virtual );
+
+ DRM(sg_cleanup)( entry );
+
+ return 0;
+}
diff --git a/bsd-core/drm_sysctl.c b/bsd-core/drm_sysctl.c
new file mode 100644
index 00000000..02e4b28d
--- /dev/null
+++ b/bsd-core/drm_sysctl.c
@@ -0,0 +1,523 @@
+SYSCTL_NODE(_hw, OID_AUTO, dri, CTLFLAG_RW, 0, "DRI Graphics");
+
+static int DRM(name_info)DRM_SYSCTL_HANDLER_ARGS;
+static int DRM(vm_info)DRM_SYSCTL_HANDLER_ARGS;
+static int DRM(clients_info)DRM_SYSCTL_HANDLER_ARGS;
+static int DRM(queues_info)DRM_SYSCTL_HANDLER_ARGS;
+static int DRM(bufs_info)DRM_SYSCTL_HANDLER_ARGS;
+#if DRM_DEBUG_CODExx
+static int DRM(vma_info)DRM_SYSCTL_HANDLER_ARGS;
+#endif
+#if DRM_DMA_HISTOGRAM
+static int DRM(histo_info)DRM_SYSCTL_HANDLER_ARGS;
+#endif
+
+struct DRM(sysctl_list) {
+ const char *name;
+ int (*f) DRM_SYSCTL_HANDLER_ARGS;
+} DRM(sysctl_list)[] = {
+ { "name", DRM(name_info) },
+ { "mem", DRM(mem_info) },
+ { "vm", DRM(vm_info) },
+ { "clients", DRM(clients_info) },
+ { "queues", DRM(queues_info) },
+ { "bufs", DRM(bufs_info) },
+#if DRM_DEBUG_CODExx
+ { "vma", DRM(vma_info) },
+#endif
+#if DRM_DMA_HISTOGRAM
+ { "histo", drm_histo_info) },
+#endif
+};
+#define DRM_SYSCTL_ENTRIES (sizeof(DRM(sysctl_list))/sizeof(DRM(sysctl_list)[0]))
+
+struct drm_sysctl_info {
+ struct sysctl_oid oids[DRM_SYSCTL_ENTRIES + 1];
+ struct sysctl_oid_list list;
+ char name[2];
+};
+
+int DRM(sysctl_init)(drm_device_t *dev)
+{
+ struct drm_sysctl_info *info;
+ struct sysctl_oid *oid;
+ struct sysctl_oid *top;
+ int i;
+
+ /* Find the next free slot under hw.graphics */
+ i = 0;
+ SLIST_FOREACH(oid, &sysctl__hw_dri_children, oid_link) {
+ if (i <= oid->oid_arg2)
+ i = oid->oid_arg2 + 1;
+ }
+
+ info = DRM(alloc)(sizeof *info, DRM_MEM_DRIVER);
+ dev->sysctl = info;
+
+ /* Construct the node under hw.graphics */
+ info->name[0] = '0' + i;
+ info->name[1] = 0;
+ oid = &info->oids[DRM_SYSCTL_ENTRIES];
+ bzero(oid, sizeof(*oid));
+ oid->oid_parent = &sysctl__hw_dri_children;
+ oid->oid_number = OID_AUTO;
+ oid->oid_kind = CTLTYPE_NODE | CTLFLAG_RW;
+ oid->oid_arg1 = &info->list;
+ oid->oid_arg2 = i;
+ oid->oid_name = info->name;
+ oid->oid_handler = 0;
+ oid->oid_fmt = "N";
+ SLIST_INIT(&info->list);
+ sysctl_register_oid(oid);
+ top = oid;
+
+ for (i = 0; i < DRM_SYSCTL_ENTRIES; i++) {
+ oid = &info->oids[i];
+ bzero(oid, sizeof(*oid));
+ oid->oid_parent = top->oid_arg1;
+ oid->oid_number = OID_AUTO;
+ oid->oid_kind = CTLTYPE_INT | CTLFLAG_RD;
+ oid->oid_arg1 = dev;
+ oid->oid_arg2 = 0;
+ oid->oid_name = DRM(sysctl_list)[i].name;
+ oid->oid_handler = DRM(sysctl_list[)i].f;
+ oid->oid_fmt = "A";
+ sysctl_register_oid(oid);
+ }
+
+ return 0;
+}
+
+int DRM(sysctl_cleanup)(drm_device_t *dev)
+{
+ int i;
+
+ DRM_DEBUG("dev->sysctl=%p\n", dev->sysctl);
+ for (i = 0; i < DRM_SYSCTL_ENTRIES + 1; i++)
+ sysctl_unregister_oid(&dev->sysctl->oids[i]);
+
+ DRM(free)(dev->sysctl, sizeof *dev->sysctl, DRM_MEM_DRIVER);
+ dev->sysctl = NULL;
+
+ return 0;
+}
+
+static int DRM(name_info)DRM_SYSCTL_HANDLER_ARGS
+{
+ drm_device_t *dev = arg1;
+ char buf[128];
+ int error;
+
+ if (dev->unique) {
+ DRM_SYSCTL_PRINT("%s 0x%x %s\n",
+ dev->name, dev2udev(dev->devnode), dev->unique);
+ } else {
+ DRM_SYSCTL_PRINT("%s 0x%x\n", dev->name, dev2udev(dev->devnode));
+ }
+
+ SYSCTL_OUT(req, "", 1);
+
+ return 0;
+}
+
+static int DRM(_vm_info)DRM_SYSCTL_HANDLER_ARGS
+{
+ drm_device_t *dev = arg1;
+ drm_map_t *map;
+ drm_map_list_entry_t *listentry;
+ const char *types[] = { "FB", "REG", "SHM" };
+ const char *type;
+ int i=0;
+ char buf[128];
+ int error;
+
+ DRM_SYSCTL_PRINT("slot offset size type flags "
+ "address mtrr\n\n");
+ error = SYSCTL_OUT(req, buf, strlen(buf));
+ if (error) return error;
+
+ if (dev->maplist != NULL) {
+ TAILQ_FOREACH(listentry, dev->maplist, link) {
+ map = listentry->map;
+ if (map->type < 0 || map->type > 2) type = "??";
+ else type = types[map->type];
+ DRM_SYSCTL_PRINT("%4d 0x%08lx 0x%08lx %4.4s 0x%02x 0x%08lx ",
+ i,
+ map->offset,
+ map->size,
+ type,
+ map->flags,
+ (unsigned long)map->handle);
+ if (map->mtrr < 0) {
+ DRM_SYSCTL_PRINT("none\n");
+ } else {
+ DRM_SYSCTL_PRINT("%4d\n", map->mtrr);
+ }
+ i++;
+ }
+ }
+ SYSCTL_OUT(req, "", 1);
+
+ return 0;
+}
+
+static int DRM(vm_info)DRM_SYSCTL_HANDLER_ARGS
+{
+ drm_device_t *dev = arg1;
+ int ret;
+
+ DRM_OS_LOCK;
+ ret = DRM(_vm_info)(oidp, arg1, arg2, req);
+ DRM_OS_UNLOCK;
+
+ return ret;
+}
+
+
+static int DRM(_queues_info)DRM_SYSCTL_HANDLER_ARGS
+{
+ drm_device_t *dev = arg1;
+ int i;
+ drm_queue_t *q;
+ char buf[128];
+ int error;
+
+ DRM_SYSCTL_PRINT(" ctx/flags use fin"
+ " blk/rw/rwf wait flushed queued"
+ " locks\n\n");
+ for (i = 0; i < dev->queue_count; i++) {
+ q = dev->queuelist[i];
+ atomic_inc(&q->use_count);
+ DRM_SYSCTL_PRINT_RET(atomic_dec(&q->use_count),
+ "%5d/0x%03x %5ld %5ld"
+ " %5ld/%c%c/%c%c%c %5d %10ld %10ld %10ld\n",
+ i,
+ q->flags,
+ atomic_read(&q->use_count),
+ atomic_read(&q->finalization),
+ atomic_read(&q->block_count),
+ atomic_read(&q->block_read) ? 'r' : '-',
+ atomic_read(&q->block_write) ? 'w' : '-',
+ q->read_queue ? 'r':'-',
+ q->write_queue ? 'w':'-',
+ q->flush_queue ? 'f':'-',
+ DRM_BUFCOUNT(&q->waitlist),
+ atomic_read(&q->total_flushed),
+ atomic_read(&q->total_queued),
+ atomic_read(&q->total_locks));
+ atomic_dec(&q->use_count);
+ }
+
+ SYSCTL_OUT(req, "", 1);
+ return 0;
+}
+
+static int DRM(queues_info) DRM_SYSCTL_HANDLER_ARGS
+{
+ drm_device_t *dev = arg1;
+ int ret;
+
+ DRM_OS_LOCK;
+ ret = DRM(_queues_info)(oidp, arg1, arg2, req);
+ DRM_OS_UNLOCK;
+ return ret;
+}
+
+/* drm_bufs_info is called whenever a process reads
+ hw.dri.0.bufs. */
+
+static int DRM(_bufs_info) DRM_SYSCTL_HANDLER_ARGS
+{
+ drm_device_t *dev = arg1;
+ drm_device_dma_t *dma = dev->dma;
+ int i;
+ char buf[128];
+ int error;
+
+ if (!dma) return 0;
+ DRM_SYSCTL_PRINT(" o size count free segs pages kB\n\n");
+ for (i = 0; i <= DRM_MAX_ORDER; i++) {
+ if (dma->bufs[i].buf_count)
+ DRM_SYSCTL_PRINT("%2d %8d %5d %5ld %5d %5d %5d\n",
+ i,
+ dma->bufs[i].buf_size,
+ dma->bufs[i].buf_count,
+ atomic_read(&dma->bufs[i]
+ .freelist.count),
+ dma->bufs[i].seg_count,
+ dma->bufs[i].seg_count
+ *(1 << dma->bufs[i].page_order),
+ (dma->bufs[i].seg_count
+ * (1 << dma->bufs[i].page_order))
+ * PAGE_SIZE / 1024);
+ }
+ DRM_SYSCTL_PRINT("\n");
+ for (i = 0; i < dma->buf_count; i++) {
+ if (i && !(i%32)) DRM_SYSCTL_PRINT("\n");
+ DRM_SYSCTL_PRINT(" %d", dma->buflist[i]->list);
+ }
+ DRM_SYSCTL_PRINT("\n");
+
+ SYSCTL_OUT(req, "", 1);
+ return 0;
+}
+
+static int DRM(bufs_info) DRM_SYSCTL_HANDLER_ARGS
+{
+ drm_device_t *dev = arg1;
+ int ret;
+
+ DRM_OS_LOCK;
+ ret = DRM(_bufs_info)(oidp, arg1, arg2, req);
+ DRM_OS_UNLOCK;
+ return ret;
+}
+
+
+static int DRM(_clients_info) DRM_SYSCTL_HANDLER_ARGS
+{
+ drm_device_t *dev = arg1;
+ drm_file_t *priv;
+ char buf[128];
+ int error;
+
+ DRM_SYSCTL_PRINT("a dev pid uid magic ioctls\n\n");
+ TAILQ_FOREACH(priv, &dev->files, link) {
+ DRM_SYSCTL_PRINT("%c %3d %5d %5d %10u %10lu\n",
+ priv->authenticated ? 'y' : 'n',
+ priv->minor,
+ priv->pid,
+ priv->uid,
+ priv->magic,
+ priv->ioctl_count);
+ }
+
+ SYSCTL_OUT(req, "", 1);
+ return 0;
+}
+
+static int DRM(clients_info)DRM_SYSCTL_HANDLER_ARGS
+{
+ drm_device_t *dev = arg1;
+ int ret;
+
+ DRM_OS_LOCK;
+ ret = DRM(_clients_info)(oidp, arg1, arg2, req);
+ DRM_OS_UNLOCK;
+ return ret;
+}
+
+#if DRM_DEBUG_CODExx
+
+static int DRM(_vma_info)DRM_SYSCTL_HANDLER_ARGS
+{
+ drm_device_t *dev = arg1;
+ drm_vma_entry_t *pt;
+ pgd_t *pgd;
+ pmd_t *pmd;
+ pte_t *pte;
+ unsigned long i;
+ struct vm_area_struct *vma;
+ unsigned long address;
+#if defined(__i386__)
+ unsigned int pgprot;
+#endif
+ char buf[128];
+ int error;
+
+ DRM_SYSCTL_PRINT("vma use count: %d, high_memory = %p, 0x%08lx\n",
+ atomic_read(&dev->vma_count),
+ high_memory, virt_to_phys(high_memory));
+ for (pt = dev->vmalist; pt; pt = pt->next) {
+ if (!(vma = pt->vma)) continue;
+ DRM_SYSCTL_PRINT("\n%5d 0x%08lx-0x%08lx %c%c%c%c%c%c 0x%08lx",
+ pt->pid,
+ vma->vm_start,
+ vma->vm_end,
+ vma->vm_flags & VM_READ ? 'r' : '-',
+ vma->vm_flags & VM_WRITE ? 'w' : '-',
+ vma->vm_flags & VM_EXEC ? 'x' : '-',
+ vma->vm_flags & VM_MAYSHARE ? 's' : 'p',
+ vma->vm_flags & VM_LOCKED ? 'l' : '-',
+ vma->vm_flags & VM_IO ? 'i' : '-',
+ vma->vm_offset );
+#if defined(__i386__)
+ pgprot = pgprot_val(vma->vm_page_prot);
+ DRM_SYSCTL_PRINT(" %c%c%c%c%c%c%c%c%c",
+ pgprot & _PAGE_PRESENT ? 'p' : '-',
+ pgprot & _PAGE_RW ? 'w' : 'r',
+ pgprot & _PAGE_USER ? 'u' : 's',
+ pgprot & _PAGE_PWT ? 't' : 'b',
+ pgprot & _PAGE_PCD ? 'u' : 'c',
+ pgprot & _PAGE_ACCESSED ? 'a' : '-',
+ pgprot & _PAGE_DIRTY ? 'd' : '-',
+ pgprot & _PAGE_4M ? 'm' : 'k',
+ pgprot & _PAGE_GLOBAL ? 'g' : 'l' );
+#endif
+ DRM_SYSCTL_PRINT("\n");
+ for (i = vma->vm_start; i < vma->vm_end; i += PAGE_SIZE) {
+ pgd = pgd_offset(vma->vm_mm, i);
+ pmd = pmd_offset(pgd, i);
+ pte = pte_offset(pmd, i);
+ if (pte_present(*pte)) {
+ address = __pa(pte_page(*pte))
+ + (i & (PAGE_SIZE-1));
+ DRM_SYSCTL_PRINT(" 0x%08lx -> 0x%08lx"
+ " %c%c%c%c%c\n",
+ i,
+ address,
+ pte_read(*pte) ? 'r' : '-',
+ pte_write(*pte) ? 'w' : '-',
+ pte_exec(*pte) ? 'x' : '-',
+ pte_dirty(*pte) ? 'd' : '-',
+ pte_young(*pte) ? 'a' : '-' );
+ } else {
+ DRM_SYSCTL_PRINT(" 0x%08lx\n", i);
+ }
+ }
+ }
+
+ SYSCTL_OUT(req, "", 1);
+ return 0;
+}
+
+static int DRM(vma_info)DRM_SYSCTL_HANDLER_ARGS
+{
+ drm_device_t *dev = arg1;
+ int ret;
+
+ DRM_OS_LOCK;
+ ret = DRM(_vma_info)(oidp, arg1, arg2, req);
+ DRM_OS_UNLOCK;
+ return ret;
+}
+#endif
+
+
+#if DRM_DMA_HISTOGRAM
+static int DRM(_histo_info)DRM_SYSCTL_HANDLER_ARGS
+{
+ drm_device_t *dev = arg1;
+ drm_device_dma_t *dma = dev->dma;
+ int i;
+ unsigned long slot_value = DRM_DMA_HISTOGRAM_INITIAL;
+ unsigned long prev_value = 0;
+ drm_buf_t *buffer;
+ char buf[128];
+ int error;
+
+ DRM_SYSCTL_PRINT("general statistics:\n");
+ DRM_SYSCTL_PRINT("total %10u\n", atomic_read(&dev->histo.total));
+ DRM_SYSCTL_PRINT("open %10u\n", atomic_read(&dev->total_open));
+ DRM_SYSCTL_PRINT("close %10u\n", atomic_read(&dev->total_close));
+ DRM_SYSCTL_PRINT("ioctl %10u\n", atomic_read(&dev->total_ioctl));
+ DRM_SYSCTL_PRINT("irq %10u\n", atomic_read(&dev->total_irq));
+ DRM_SYSCTL_PRINT("ctx %10u\n", atomic_read(&dev->total_ctx));
+
+ DRM_SYSCTL_PRINT("\nlock statistics:\n");
+ DRM_SYSCTL_PRINT("locks %10u\n", atomic_read(&dev->total_locks));
+ DRM_SYSCTL_PRINT("unlocks %10u\n", atomic_read(&dev->total_unlocks));
+ DRM_SYSCTL_PRINT("contends %10u\n", atomic_read(&dev->total_contends));
+ DRM_SYSCTL_PRINT("sleeps %10u\n", atomic_read(&dev->total_sleeps));
+
+
+ if (dma) {
+ DRM_SYSCTL_PRINT("\ndma statistics:\n");
+ DRM_SYSCTL_PRINT("prio %10u\n",
+ atomic_read(&dma->total_prio));
+ DRM_SYSCTL_PRINT("bytes %10u\n",
+ atomic_read(&dma->total_bytes));
+ DRM_SYSCTL_PRINT("dmas %10u\n",
+ atomic_read(&dma->total_dmas));
+ DRM_SYSCTL_PRINT("missed:\n");
+ DRM_SYSCTL_PRINT(" dma %10u\n",
+ atomic_read(&dma->total_missed_dma));
+ DRM_SYSCTL_PRINT(" lock %10u\n",
+ atomic_read(&dma->total_missed_lock));
+ DRM_SYSCTL_PRINT(" free %10u\n",
+ atomic_read(&dma->total_missed_free));
+ DRM_SYSCTL_PRINT(" sched %10u\n",
+ atomic_read(&dma->total_missed_sched));
+ DRM_SYSCTL_PRINT("tried %10u\n",
+ atomic_read(&dma->total_tried));
+ DRM_SYSCTL_PRINT("hit %10u\n",
+ atomic_read(&dma->total_hit));
+ DRM_SYSCTL_PRINT("lost %10u\n",
+ atomic_read(&dma->total_lost));
+
+ buffer = dma->next_buffer;
+ if (buffer) {
+ DRM_SYSCTL_PRINT("next_buffer %7d\n", buffer->idx);
+ } else {
+ DRM_SYSCTL_PRINT("next_buffer none\n");
+ }
+ buffer = dma->this_buffer;
+ if (buffer) {
+ DRM_SYSCTL_PRINT("this_buffer %7d\n", buffer->idx);
+ } else {
+ DRM_SYSCTL_PRINT("this_buffer none\n");
+ }
+ }
+
+
+ DRM_SYSCTL_PRINT("\nvalues:\n");
+ if (dev->lock.hw_lock) {
+ DRM_SYSCTL_PRINT("lock 0x%08x\n",
+ dev->lock.hw_lock->lock);
+ } else {
+ DRM_SYSCTL_PRINT("lock none\n");
+ }
+ DRM_SYSCTL_PRINT("context_flag 0x%08x\n", dev->context_flag);
+ DRM_SYSCTL_PRINT("interrupt_flag 0x%08x\n", dev->interrupt_flag);
+ DRM_SYSCTL_PRINT("dma_flag 0x%08x\n", dev->dma_flag);
+
+ DRM_SYSCTL_PRINT("queue_count %10d\n", dev->queue_count);
+ DRM_SYSCTL_PRINT("last_context %10d\n", dev->last_context);
+ DRM_SYSCTL_PRINT("last_switch %10u\n", dev->last_switch);
+ DRM_SYSCTL_PRINT("last_checked %10d\n", dev->last_checked);
+
+
+ DRM_SYSCTL_PRINT("\n q2d d2c c2f"
+ " q2c q2f dma sch"
+ " ctx lacq lhld\n\n");
+ for (i = 0; i < DRM_DMA_HISTOGRAM_SLOTS; i++) {
+ DRM_SYSCTL_PRINT("%s %10lu %10u %10u %10u %10u %10u"
+ " %10u %10u %10u %10u %10u\n",
+ i == DRM_DMA_HISTOGRAM_SLOTS - 1 ? ">=" : "< ",
+ i == DRM_DMA_HISTOGRAM_SLOTS - 1
+ ? prev_value : slot_value ,
+
+ atomic_read(&dev->histo
+ .queued_to_dispatched[i]),
+ atomic_read(&dev->histo
+ .dispatched_to_completed[i]),
+ atomic_read(&dev->histo
+ .completed_to_freed[i]),
+
+ atomic_read(&dev->histo
+ .queued_to_completed[i]),
+ atomic_read(&dev->histo
+ .queued_to_freed[i]),
+ atomic_read(&dev->histo.dma[i]),
+ atomic_read(&dev->histo.schedule[i]),
+ atomic_read(&dev->histo.ctx[i]),
+ atomic_read(&dev->histo.lacq[i]),
+ atomic_read(&dev->histo.lhld[i]));
+ prev_value = slot_value;
+ slot_value = DRM_DMA_HISTOGRAM_NEXT(slot_value);
+ }
+ SYSCTL_OUT(req, "", 1);
+ return 0;
+}
+
+static int DRM(histo_info)DRM_SYSCTL_HANDLER_ARGS
+{
+ drm_device_t *dev = arg1;
+ int ret;
+
+ DRM_OS_LOCK;
+ ret = _drm_histo_info(oidp, arg1, arg2, req);
+ DRM_OS_UNLOCK;
+ return ret;
+}
+#endif
diff --git a/bsd-core/drm_vm.c b/bsd-core/drm_vm.c
new file mode 100644
index 00000000..a06fb448
--- /dev/null
+++ b/bsd-core/drm_vm.c
@@ -0,0 +1,81 @@
+#include <vm/vm.h>
+#include <vm/pmap.h>
+
+static int DRM(dma_mmap)(dev_t kdev, vm_offset_t offset, int prot)
+{
+ drm_device_t *dev = kdev->si_drv1;
+ drm_device_dma_t *dma = dev->dma;
+ unsigned long physical;
+ unsigned long page;
+
+ if (!dma) return -1; /* Error */
+ if (!dma->pagelist) return -1; /* Nothing allocated */
+
+ page = offset >> PAGE_SHIFT;
+ physical = dma->pagelist[page];
+
+ DRM_DEBUG("0x%08x (page %lu) => 0x%08lx\n", offset, page, physical);
+ return atop(physical);
+}
+
+int DRM(mmap)(dev_t kdev, vm_offset_t offset, int prot)
+{
+ drm_device_t *dev = kdev->si_drv1;
+ drm_map_t *map = NULL;
+ drm_map_list_entry_t *listentry=NULL;
+ /*drm_file_t *priv;*/
+
+/* DRM_DEBUG("offset = 0x%x\n", offset);*/
+
+ /*XXX Fixme */
+ /*priv = DRM(find_file_by_proc)(dev, p);
+ if (!priv) {
+ DRM_DEBUG("can't find authenticator\n");
+ return EINVAL;
+ }
+
+ if (!priv->authenticated) DRM_OS_RETURN(EACCES);*/
+
+ if (dev->dma
+ && offset >= 0
+ && offset < ptoa(dev->dma->page_count))
+ return DRM(dma_mmap)(kdev, offset, prot);
+
+ /* A sequential search of a linked list is
+ fine here because: 1) there will only be
+ about 5-10 entries in the list and, 2) a
+ DRI client only has to do this mapping
+ once, so it doesn't have to be optimized
+ for performance, even if the list was a
+ bit longer. */
+ TAILQ_FOREACH(listentry, dev->maplist, link) {
+ map = listentry->map;
+/* DRM_DEBUG("considering 0x%x..0x%x\n", map->offset, map->offset + map->size - 1);*/
+ if (offset >= map->offset
+ && offset < map->offset + map->size) break;
+ }
+
+ if (!listentry) {
+ DRM_DEBUG("can't find map\n");
+ return -1;
+ }
+ if (((map->flags&_DRM_RESTRICTED) && suser(curproc))) {
+ DRM_DEBUG("restricted map\n");
+ return -1;
+ }
+
+ switch (map->type) {
+ case _DRM_FRAME_BUFFER:
+ case _DRM_REGISTERS:
+ case _DRM_AGP:
+ return atop(offset);
+ case _DRM_SHM:
+ return atop(vtophys(offset));
+ default:
+ return -1; /* This should never happen. */
+ }
+ DRM_DEBUG("bailing out\n");
+
+ return -1;
+}
+
diff --git a/bsd-core/mga/Makefile b/bsd-core/mga/Makefile
new file mode 100644
index 00000000..bbaeaa56
--- /dev/null
+++ b/bsd-core/mga/Makefile
@@ -0,0 +1,25 @@
+# $FreeBSD$
+
+KMOD= mga
+NOMAN= YES
+SRCS= mga_drv.c mga_state.c mga_warp.c mga_dma.c
+SRCS+= device_if.h bus_if.h pci_if.h opt_drm_linux.h
+CFLAGS+= ${DEBUG_FLAGS} -I. -I..
+
+@:
+ ln -sf /sys @
+
+machine:
+ ln -sf /sys/i386/include machine
+
+.if ${MACHINE_ARCH} == "i386"
+# This line enables linux ioctl handling
+# If you want support for this uncomment this line
+#MGA_OPTS= "\#define DRM_LINUX" 1
+.endif
+
+opt_drm_linux.h:
+ touch opt_drm_linux.h
+ echo $(MGA_OPTS) >> opt_drm_linux.h
+
+.include <bsd.kmod.mk>
diff --git a/bsd-core/r128/Makefile b/bsd-core/r128/Makefile
new file mode 100644
index 00000000..ae5622ed
--- /dev/null
+++ b/bsd-core/r128/Makefile
@@ -0,0 +1,25 @@
+# $FreeBSD$
+
+KMOD = r128
+NOMAN= YES
+SRCS = r128_cce.c r128_drv.c r128_state.c
+SRCS += device_if.h bus_if.h pci_if.h opt_drm_linux.h
+CFLAGS += ${DEBUG_FLAGS} -I. -I..
+
+@:
+ ln -sf /sys @
+
+machine:
+ ln -sf /sys/i386/include machine
+
+.if ${MACHINE_ARCH} == "i386"
+# This line enables linux ioctl handling
+# If you want support for this uncomment this line
+#R128_OPTS= "\#define DRM_LINUX" 1
+.endif
+
+opt_drm_linux.h:
+ touch opt_drm_linux.h
+ echo $(R128_OPTS) >> opt_drm_linux.h
+
+.include <bsd.kmod.mk>
diff --git a/bsd-core/radeon/Makefile b/bsd-core/radeon/Makefile
new file mode 100644
index 00000000..b1d77bf4
--- /dev/null
+++ b/bsd-core/radeon/Makefile
@@ -0,0 +1,25 @@
+# $FreeBSD$
+
+KMOD = radeon
+NOMAN= YES
+SRCS = radeon_cp.c radeon_drv.c radeon_state.c
+SRCS += device_if.h bus_if.h pci_if.h opt_drm_linux.h
+CFLAGS += ${DEBUG_FLAGS} -I. -I..
+
+@:
+ ln -sf /sys @
+
+machine:
+ ln -sf /sys/i386/include machine
+
+.if ${MACHINE_ARCH} == "i386"
+# This line enables linux ioctl handling
+# If you want support for this uncomment this line
+#RADEON_OPTS= "\#define DRM_LINUX" 1
+.endif
+
+opt_drm_linux.h:
+ touch opt_drm_linux.h
+ echo $(RADEON_OPTS) >> opt_drm_linux.h
+
+.include <bsd.kmod.mk>
diff --git a/bsd-core/tdfx/Makefile b/bsd-core/tdfx/Makefile
index 4362a5ba..d177ff60 100644
--- a/bsd-core/tdfx/Makefile
+++ b/bsd-core/tdfx/Makefile
@@ -1,10 +1,10 @@
# $FreeBSD$
-KMOD = tdfx
-SRCS = tdfx_drv.c tdfx_context.c
-SRCS += device_if.h bus_if.h pci_if.h
-CFLAGS += ${DEBUG_FLAGS} -I. -I..
-KMODDEPS = drm
+KMOD= tdfx
+NOMAN= YES
+SRCS= tdfx_drv.c
+SRCS+= device_if.h bus_if.h pci_if.h opt_drm_linux.h
+CFLAGS+= ${DEBUG_FLAGS} -I. -I..
@:
ln -sf /sys @
@@ -12,4 +12,14 @@ KMODDEPS = drm
machine:
ln -sf /sys/i386/include machine
+.if ${MACHINE_ARCH} == "i386"
+# This line enables linux ioctl handling
+# If you want support for this uncomment this line
+#TDFX_OPTS= "\#define DRM_LINUX" 1
+.endif
+
+opt_drm_linux.h:
+ touch opt_drm_linux.h
+ echo $(TDFX_OPTS) >> opt_drm_linux.h
+
.include <bsd.kmod.mk>
diff --git a/bsd/Imakefile b/bsd/Imakefile
index 0e11ec50..d18f1873 100644
--- a/bsd/Imakefile
+++ b/bsd/Imakefile
@@ -1,28 +1,4 @@
-XCOMM $XFree86: xc/programs/Xserver/hw/xfree86/os-support/bsd/drm/kernel/Imakefile,v 1.6 2001/04/18 14:52:43 dawes Exp $
-
-#include <Server.tmpl>
-
-#if 0
-LinkSourceFile(xf86drm.c,..)
-LinkSourceFile(xf86drmHash.c,..)
-LinkSourceFile(xf86drmRandom.c,..)
-LinkSourceFile(xf86drmSL.c,..)
-LinkSourceFile(xf86drm.h,$(XF86OSSRC))
-LinkSourceFile(xf86_OSproc.h,$(XF86OSSRC))
-LinkSourceFile(sigio.c,$(XF86OSSRC)/shared)
-#endif
-
-XCOMM Try to use the Linux version of the DRM headers. This avoids skew
-XCOMM and missing headers. If there's a need to break them out, they
-XCOMM can be re-added later. If not, they can be moved to somewhere more
-XCOMM OS-independent and referenced from there.
-LinkSourceFile(drm.h,$(XF86OSSRC)/linux/drm/kernel)
-LinkSourceFile(i810_drm.h,$(XF86OSSRC)/linux/drm/kernel)
-LinkSourceFile(mga_drm.h,$(XF86OSSRC)/linux/drm/kernel)
-LinkSourceFile(r128_drm.h,$(XF86OSSRC)/linux/drm/kernel)
-LinkSourceFile(radeon_drm.h,$(XF86OSSRC)/linux/drm/kernel)
-LinkSourceFile(sis_drm.h,$(XF86OSSRC)/linux/drm/kernel)
-
+XCOMM $XFree86: xc/programs/Xserver/hw/xfree86/os-support/bsd/drm/kernel/Imakefile,v 1.8 2001/12/13 00:24:45 alanh Exp $
XCOMM This is a kludge until we determine how best to build the
XCOMM kernel-specific device driver. This allows us to continue
@@ -37,7 +13,7 @@ install::
$(MAKE) -f Makefile.bsd install
#else
all::
- @echo 'Use "make -f Makefile.bsd" to manually build drm.o'
+ @echo 'Use "make -f Makefile.bsd" to manually build the modules'
#endif
clean::
diff --git a/bsd/Makefile b/bsd/Makefile
index 61cba175..9c87d963 100644
--- a/bsd/Makefile
+++ b/bsd/Makefile
@@ -1,5 +1,6 @@
# $FreeBSD$
-SUBDIR = drm tdfx mga gamma
+# i810, i830 & sis are not complete
+SUBDIR = tdfx mga r128 radeon gamma # i810 sis i830
.include <bsd.subdir.mk>
diff --git a/bsd/Makefile.bsd b/bsd/Makefile.bsd
index 61cba175..9c87d963 100644
--- a/bsd/Makefile.bsd
+++ b/bsd/Makefile.bsd
@@ -1,5 +1,6 @@
# $FreeBSD$
-SUBDIR = drm tdfx mga gamma
+# i810, i830 & sis are not complete
+SUBDIR = tdfx mga r128 radeon gamma # i810 sis i830
.include <bsd.subdir.mk>
diff --git a/bsd/ati_pcigart.h b/bsd/ati_pcigart.h
new file mode 100644
index 00000000..8b486c10
--- /dev/null
+++ b/bsd/ati_pcigart.h
@@ -0,0 +1,197 @@
+/* ati_pcigart.h -- ATI PCI GART support -*- linux-c -*-
+ * Created: Wed Dec 13 21:52:19 2000 by gareth@valinux.com
+ *
+ * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ * Authors:
+ * Gareth Hughes <gareth@valinux.com>
+ */
+
+#define __NO_VERSION__
+#include "drmP.h"
+
+#if PAGE_SIZE == 8192
+# define ATI_PCIGART_TABLE_ORDER 2
+# define ATI_PCIGART_TABLE_PAGES (1 << 2)
+#elif PAGE_SIZE == 4096
+# define ATI_PCIGART_TABLE_ORDER 3
+# define ATI_PCIGART_TABLE_PAGES (1 << 3)
+#elif
+# error - PAGE_SIZE not 8K or 4K
+#endif
+
+# define ATI_MAX_PCIGART_PAGES 8192 /* 32 MB aperture, 4K pages */
+# define ATI_PCIGART_PAGE_SIZE 4096 /* PCI GART page size */
+
+static unsigned long DRM(ati_alloc_pcigart_table)( void )
+{
+ unsigned long address;
+ struct page *page;
+ int i;
+ DRM_DEBUG( "%s\n", __FUNCTION__ );
+
+ address = __get_free_pages( GFP_KERNEL, ATI_PCIGART_TABLE_ORDER );
+ if ( address == 0UL ) {
+ return 0;
+ }
+
+ page = virt_to_page( address );
+
+ for ( i = 0 ; i <= ATI_PCIGART_TABLE_PAGES ; i++, page++ ) {
+ atomic_inc( &page->count );
+ SetPageReserved( page );
+ }
+
+ DRM_DEBUG( "%s: returning 0x%08lx\n", __FUNCTION__, address );
+ return address;
+}
+
+static void DRM(ati_free_pcigart_table)( unsigned long address )
+{
+ struct page *page;
+ int i;
+ DRM_DEBUG( "%s\n", __FUNCTION__ );
+
+ page = virt_to_page( address );
+
+ for ( i = 0 ; i <= ATI_PCIGART_TABLE_PAGES ; i++, page++ ) {
+ atomic_dec( &page->count );
+ ClearPageReserved( page );
+ }
+
+ free_pages( address, ATI_PCIGART_TABLE_ORDER );
+}
+
+int DRM(ati_pcigart_init)( drm_device_t *dev,
+ unsigned long *addr,
+ dma_addr_t *bus_addr)
+{
+ drm_sg_mem_t *entry = dev->sg;
+ unsigned long address = 0;
+ unsigned long pages;
+ u32 *pci_gart, page_base, bus_address = 0;
+ int i, j, ret = 0;
+
+ if ( !entry ) {
+ DRM_ERROR( "no scatter/gather memory!\n" );
+ goto done;
+ }
+
+ address = DRM(ati_alloc_pcigart_table)();
+ if ( !address ) {
+ DRM_ERROR( "cannot allocate PCI GART page!\n" );
+ goto done;
+ }
+
+ if ( !dev->pdev ) {
+ DRM_ERROR( "PCI device unknown!\n" );
+ goto done;
+ }
+
+ bus_address = pci_map_single(dev->pdev, (void *)address,
+ ATI_PCIGART_TABLE_PAGES * PAGE_SIZE,
+ PCI_DMA_TODEVICE);
+ if (bus_address == 0) {
+ DRM_ERROR( "unable to map PCIGART pages!\n" );
+ DRM(ati_free_pcigart_table)( address );
+ address = 0;
+ goto done;
+ }
+
+ pci_gart = (u32 *)address;
+
+ pages = ( entry->pages <= ATI_MAX_PCIGART_PAGES )
+ ? entry->pages : ATI_MAX_PCIGART_PAGES;
+
+ memset( pci_gart, 0, ATI_MAX_PCIGART_PAGES * sizeof(u32) );
+
+ for ( i = 0 ; i < pages ; i++ ) {
+ /* we need to support large memory configurations */
+ entry->busaddr[i] = pci_map_single(dev->pdev,
+ page_address( entry->pagelist[i] ),
+ PAGE_SIZE,
+ PCI_DMA_TODEVICE);
+ if (entry->busaddr[i] == 0) {
+ DRM_ERROR( "unable to map PCIGART pages!\n" );
+ DRM(ati_pcigart_cleanup)( dev, address, bus_address );
+ address = 0;
+ bus_address = 0;
+ goto done;
+ }
+ page_base = (u32) entry->busaddr[i];
+
+ for (j = 0; j < (PAGE_SIZE / ATI_PCIGART_PAGE_SIZE); j++) {
+ *pci_gart++ = cpu_to_le32( page_base );
+ page_base += ATI_PCIGART_PAGE_SIZE;
+ }
+ }
+
+ ret = 1;
+
+#if defined(__i386__) || defined(__x86_64__)
+ asm volatile ( "wbinvd" ::: "memory" );
+#else
+ mb();
+#endif
+
+done:
+ *addr = address;
+ *bus_addr = bus_address;
+ return ret;
+}
+
+int DRM(ati_pcigart_cleanup)( drm_device_t *dev,
+ unsigned long addr,
+ dma_addr_t bus_addr)
+{
+ drm_sg_mem_t *entry = dev->sg;
+ unsigned long pages;
+ int i;
+
+ /* we need to support large memory configurations */
+ if ( !entry ) {
+ DRM_ERROR( "no scatter/gather memory!\n" );
+ return 0;
+ }
+
+ if ( bus_addr ) {
+ pci_unmap_single(dev->pdev, bus_addr,
+ ATI_PCIGART_TABLE_PAGES * PAGE_SIZE,
+ PCI_DMA_TODEVICE);
+
+ pages = ( entry->pages <= ATI_MAX_PCIGART_PAGES )
+ ? entry->pages : ATI_MAX_PCIGART_PAGES;
+
+ for ( i = 0 ; i < pages ; i++ ) {
+ if ( !entry->busaddr[i] ) break;
+ pci_unmap_single(dev->pdev, entry->busaddr[i],
+ PAGE_SIZE, PCI_DMA_TODEVICE);
+ }
+ }
+
+ if ( addr ) {
+ DRM(ati_free_pcigart_table)( addr );
+ }
+
+ return 1;
+}
diff --git a/bsd/drm.h b/bsd/drm.h
index ddad1be7..68876940 100644
--- a/bsd/drm.h
+++ b/bsd/drm.h
@@ -1,4 +1,4 @@
-/* drm.h -- Header for Direct Rendering Manager -*- c -*-
+/* drm.h -- Header for Direct Rendering Manager -*- linux-c -*-
* Created: Mon Jan 4 10:05:05 1999 by faith@precisioninsight.com
*
* Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
@@ -11,18 +11,18 @@
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
- *
+ *
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
- *
+ *
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
- * DEALINGS IN THE SOFTWARE.
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
*
* Authors:
* Rickard E. (Rik) Faith <faith@valinux.com>
@@ -35,18 +35,34 @@
#ifndef _DRM_H_
#define _DRM_H_
-#include <sys/ioccom.h> /* For _IO* macros */
+#include <sys/ioccom.h>
+#define DRM_IOCTL_NR(n) ((n) & 0xff)
+
+#define XFREE86_VERSION(major,minor,patch,snap) \
+ ((major << 16) | (minor << 8) | patch)
+#ifndef CONFIG_XFREE86_VERSION
+#define CONFIG_XFREE86_VERSION XFREE86_VERSION(4,1,0,0)
+#endif
+
+#if CONFIG_XFREE86_VERSION < XFREE86_VERSION(4,1,0,0)
+#define DRM_PROC_DEVICES "/proc/devices"
+#define DRM_PROC_MISC "/proc/misc"
+#define DRM_PROC_DRM "/proc/drm"
#define DRM_DEV_DRM "/dev/drm"
#define DRM_DEV_MODE (S_IRUSR|S_IWUSR|S_IRGRP|S_IWGRP)
#define DRM_DEV_UID 0
#define DRM_DEV_GID 0
+#endif
-
-#define DRM_NAME "drm" /* Name in kernel, /dev */
+#if CONFIG_XFREE86_VERSION >= XFREE86_VERSION(4,1,0,0)
+#define DRM_MAJOR 226
+#define DRM_MAX_MINOR 15
+#endif
+#define DRM_NAME "drm" /* Name in kernel, /dev, and /proc */
#define DRM_MIN_ORDER 5 /* At least 2^5 bytes = 32 bytes */
#define DRM_MAX_ORDER 22 /* Up to 2^22 bytes = 4MB */
-#define DRM_RAM_PERCENT 10 /* How much system ram can we lock? */
+#define DRM_RAM_PERCENT 50 /* How much system ram can we lock? */
#define _DRM_LOCK_HELD 0x80000000 /* Hardware lock is held */
#define _DRM_LOCK_CONT 0x40000000 /* Hardware lock is contended */
@@ -59,20 +75,31 @@ typedef unsigned int drm_context_t;
typedef unsigned int drm_drawable_t;
typedef unsigned int drm_magic_t;
-/* Warning: If you change this structure, make sure you change
+/* Warning: If you change this structure, make sure you change
* XF86DRIClipRectRec in the server as well */
typedef struct drm_clip_rect {
- unsigned short x1;
- unsigned short y1;
- unsigned short x2;
- unsigned short y2;
+ unsigned short x1;
+ unsigned short y1;
+ unsigned short x2;
+ unsigned short y2;
} drm_clip_rect_t;
-/* Seperate include files for the i810/mga/r128 specific structures */
+typedef struct drm_tex_region {
+ unsigned char next;
+ unsigned char prev;
+ unsigned char in_use;
+ unsigned char padding;
+ unsigned int age;
+} drm_tex_region_t;
+
+/* Seperate include files for the driver specific structures */
#include "mga_drm.h"
#include "i810_drm.h"
+#include "i830_drm.h"
#include "r128_drm.h"
+#include "radeon_drm.h"
+#include "sis_drm.h"
typedef struct drm_version {
int version_major; /* Major version */
@@ -111,10 +138,11 @@ typedef struct drm_control {
} drm_control_t;
typedef enum drm_map_type {
- _DRM_FRAME_BUFFER = 0, /* WC (no caching), no core dump */
- _DRM_REGISTERS = 1, /* no caching, no core dump */
- _DRM_SHM = 2, /* shared, cached */
- _DRM_AGP = 3 /* AGP/GART */
+ _DRM_FRAME_BUFFER = 0, /* WC (no caching), no core dump */
+ _DRM_REGISTERS = 1, /* no caching, no core dump */
+ _DRM_SHM = 2, /* shared, cached */
+ _DRM_AGP = 3, /* AGP/GART */
+ _DRM_SCATTER_GATHER = 4 /* Scatter/gather memory for PCI DMA */
} drm_map_type_t;
typedef enum drm_map_flags {
@@ -123,9 +151,15 @@ typedef enum drm_map_flags {
_DRM_LOCKED = 0x04, /* shared, cached, locked */
_DRM_KERNEL = 0x08, /* kernel requires access */
_DRM_WRITE_COMBINING = 0x10, /* use write-combining if available */
- _DRM_CONTAINS_LOCK = 0x20 /* SHM page that contains lock */
+ _DRM_CONTAINS_LOCK = 0x20, /* SHM page that contains lock */
+ _DRM_REMOVABLE = 0x40 /* Removable mapping */
} drm_map_flags_t;
+typedef struct drm_ctx_priv_map {
+ unsigned int ctx_id; /* Context requesting private mapping */
+ void *handle; /* Handle of map */
+} drm_ctx_priv_map_t;
+
typedef struct drm_map {
unsigned long offset; /* Requested physical address (0 for SAREA)*/
unsigned long size; /* Requested physical size (bytes) */
@@ -137,6 +171,44 @@ typedef struct drm_map {
/* Private data */
} drm_map_t;
+typedef struct drm_client {
+ int idx; /* Which client desired? */
+ int auth; /* Is client authenticated? */
+ unsigned long pid; /* Process id */
+ unsigned long uid; /* User id */
+ unsigned long magic; /* Magic */
+ unsigned long iocs; /* Ioctl count */
+} drm_client_t;
+
+typedef enum {
+ _DRM_STAT_LOCK,
+ _DRM_STAT_OPENS,
+ _DRM_STAT_CLOSES,
+ _DRM_STAT_IOCTLS,
+ _DRM_STAT_LOCKS,
+ _DRM_STAT_UNLOCKS,
+ _DRM_STAT_VALUE, /* Generic value */
+ _DRM_STAT_BYTE, /* Generic byte counter (1024bytes/K) */
+ _DRM_STAT_COUNT, /* Generic non-byte counter (1000/k) */
+
+ _DRM_STAT_IRQ, /* IRQ */
+ _DRM_STAT_PRIMARY, /* Primary DMA bytes */
+ _DRM_STAT_SECONDARY, /* Secondary DMA bytes */
+ _DRM_STAT_DMA, /* DMA */
+ _DRM_STAT_SPECIAL, /* Special DMA (e.g., priority or polled) */
+ _DRM_STAT_MISSED /* Missed DMA opportunity */
+
+ /* Add to the *END* of the list */
+} drm_stat_type_t;
+
+typedef struct drm_stats {
+ unsigned long count;
+ struct {
+ unsigned long value;
+ drm_stat_type_t type;
+ } data[15];
+} drm_stats_t;
+
typedef enum drm_lock_flags {
_DRM_LOCK_READY = 0x01, /* Wait until hardware is ready for DMA */
_DRM_LOCK_QUIESCENT = 0x02, /* Wait until hardware quiescent */
@@ -179,7 +251,8 @@ typedef struct drm_buf_desc {
int high_mark; /* High water mark */
enum {
_DRM_PAGE_ALIGN = 0x01, /* Align on page boundaries for DMA */
- _DRM_AGP_BUFFER = 0x02 /* Buffer is in agp space */
+ _DRM_AGP_BUFFER = 0x02, /* Buffer is in agp space */
+ _DRM_SG_BUFFER = 0x04 /* Scatter/gather memory buffer */
} flags;
unsigned long agp_start; /* Start address of where the agp buffers
* are in the agp aperture */
@@ -285,78 +358,144 @@ typedef struct drm_agp_info {
unsigned short id_device;
} drm_agp_info_t;
-#define DRM_IOCTL_BASE 'd'
-#define DRM_IOCTL_NR(n) ((n) & 0xff)
-#define DRM_IO(nr) _IO(DRM_IOCTL_BASE,nr)
-#define DRM_IOR(nr,size) _IOR(DRM_IOCTL_BASE,nr,size)
-#define DRM_IOW(nr,size) _IOW(DRM_IOCTL_BASE,nr,size)
-#define DRM_IOWR(nr,size) _IOWR(DRM_IOCTL_BASE,nr,size)
-
-
-#define DRM_IOCTL_VERSION DRM_IOWR(0x00, drm_version_t)
-#define DRM_IOCTL_GET_UNIQUE DRM_IOWR(0x01, drm_unique_t)
-#define DRM_IOCTL_GET_MAGIC DRM_IOR( 0x02, drm_auth_t)
-#define DRM_IOCTL_IRQ_BUSID DRM_IOWR(0x03, drm_irq_busid_t)
-
-#define DRM_IOCTL_SET_UNIQUE DRM_IOW( 0x10, drm_unique_t)
-#define DRM_IOCTL_AUTH_MAGIC DRM_IOW( 0x11, drm_auth_t)
-#define DRM_IOCTL_BLOCK DRM_IOWR(0x12, drm_block_t)
-#define DRM_IOCTL_UNBLOCK DRM_IOWR(0x13, drm_block_t)
-#define DRM_IOCTL_CONTROL DRM_IOW( 0x14, drm_control_t)
-#define DRM_IOCTL_ADD_MAP DRM_IOWR(0x15, drm_map_t)
-#define DRM_IOCTL_ADD_BUFS DRM_IOWR(0x16, drm_buf_desc_t)
-#define DRM_IOCTL_MARK_BUFS DRM_IOW( 0x17, drm_buf_desc_t)
-#define DRM_IOCTL_INFO_BUFS DRM_IOWR(0x18, drm_buf_info_t)
-#define DRM_IOCTL_MAP_BUFS DRM_IOWR(0x19, drm_buf_map_t)
-#define DRM_IOCTL_FREE_BUFS DRM_IOW( 0x1a, drm_buf_free_t)
-
-#define DRM_IOCTL_ADD_CTX DRM_IOWR(0x20, drm_ctx_t)
-#define DRM_IOCTL_RM_CTX DRM_IOWR(0x21, drm_ctx_t)
-#define DRM_IOCTL_MOD_CTX DRM_IOW( 0x22, drm_ctx_t)
-#define DRM_IOCTL_GET_CTX DRM_IOWR(0x23, drm_ctx_t)
-#define DRM_IOCTL_SWITCH_CTX DRM_IOW( 0x24, drm_ctx_t)
-#define DRM_IOCTL_NEW_CTX DRM_IOW( 0x25, drm_ctx_t)
-#define DRM_IOCTL_RES_CTX DRM_IOWR(0x26, drm_ctx_res_t)
-#define DRM_IOCTL_ADD_DRAW DRM_IOWR(0x27, drm_draw_t)
-#define DRM_IOCTL_RM_DRAW DRM_IOWR(0x28, drm_draw_t)
-#define DRM_IOCTL_DMA DRM_IOWR(0x29, drm_dma_t)
-#define DRM_IOCTL_LOCK DRM_IOW( 0x2a, drm_lock_t)
-#define DRM_IOCTL_UNLOCK DRM_IOW( 0x2b, drm_lock_t)
-#define DRM_IOCTL_FINISH DRM_IOW( 0x2c, drm_lock_t)
-
-#define DRM_IOCTL_AGP_ACQUIRE DRM_IO( 0x30)
-#define DRM_IOCTL_AGP_RELEASE DRM_IO( 0x31)
-#define DRM_IOCTL_AGP_ENABLE DRM_IOW( 0x32, drm_agp_mode_t)
-#define DRM_IOCTL_AGP_INFO DRM_IOR( 0x33, drm_agp_info_t)
-#define DRM_IOCTL_AGP_ALLOC DRM_IOWR(0x34, drm_agp_buffer_t)
-#define DRM_IOCTL_AGP_FREE DRM_IOW( 0x35, drm_agp_buffer_t)
-#define DRM_IOCTL_AGP_BIND DRM_IOW( 0x36, drm_agp_binding_t)
-#define DRM_IOCTL_AGP_UNBIND DRM_IOW( 0x37, drm_agp_binding_t)
-
-/* Mga specific ioctls */
-#define DRM_IOCTL_MGA_INIT DRM_IOW( 0x40, drm_mga_init_t)
-#define DRM_IOCTL_MGA_SWAP DRM_IOW( 0x41, drm_mga_swap_t)
-#define DRM_IOCTL_MGA_CLEAR DRM_IOW( 0x42, drm_mga_clear_t)
-#define DRM_IOCTL_MGA_ILOAD DRM_IOW( 0x43, drm_mga_iload_t)
-#define DRM_IOCTL_MGA_VERTEX DRM_IOW( 0x44, drm_mga_vertex_t)
-#define DRM_IOCTL_MGA_FLUSH DRM_IOW( 0x45, drm_lock_t )
-#define DRM_IOCTL_MGA_INDICES DRM_IOW( 0x46, drm_mga_indices_t)
-
-/* I810 specific ioctls */
-#define DRM_IOCTL_I810_INIT DRM_IOW( 0x40, drm_i810_init_t)
-#define DRM_IOCTL_I810_VERTEX DRM_IOW( 0x41, drm_i810_vertex_t)
-#define DRM_IOCTL_I810_CLEAR DRM_IOW( 0x42, drm_i810_clear_t)
-#define DRM_IOCTL_I810_FLUSH DRM_IO ( 0x43)
-#define DRM_IOCTL_I810_GETAGE DRM_IO ( 0x44)
-#define DRM_IOCTL_I810_GETBUF DRM_IOWR(0x45, drm_i810_dma_t)
-#define DRM_IOCTL_I810_SWAP DRM_IO ( 0x46)
+typedef struct drm_scatter_gather {
+ unsigned long size; /* In bytes -- will round to page boundary */
+ unsigned long handle; /* Used for mapping / unmapping */
+} drm_scatter_gather_t;
+
+#define DRM_IOCTL_BASE 'd'
+#define DRM_IO(nr) _IO(DRM_IOCTL_BASE,nr)
+#define DRM_IOR(nr,size) _IOR(DRM_IOCTL_BASE,nr,size)
+#define DRM_IOW(nr,size) _IOW(DRM_IOCTL_BASE,nr,size)
+#define DRM_IOWR(nr,size) _IOWR(DRM_IOCTL_BASE,nr,size)
+
+
+#define DRM_IOCTL_VERSION DRM_IOWR(0x00, drm_version_t)
+#define DRM_IOCTL_GET_UNIQUE DRM_IOWR(0x01, drm_unique_t)
+#define DRM_IOCTL_GET_MAGIC DRM_IOR( 0x02, drm_auth_t)
+#define DRM_IOCTL_IRQ_BUSID DRM_IOWR(0x03, drm_irq_busid_t)
+#define DRM_IOCTL_GET_MAP DRM_IOWR(0x04, drm_map_t)
+#define DRM_IOCTL_GET_CLIENT DRM_IOWR(0x05, drm_client_t)
+#define DRM_IOCTL_GET_STATS DRM_IOR( 0x06, drm_stats_t)
+
+#define DRM_IOCTL_SET_UNIQUE DRM_IOW( 0x10, drm_unique_t)
+#define DRM_IOCTL_AUTH_MAGIC DRM_IOW( 0x11, drm_auth_t)
+#define DRM_IOCTL_BLOCK DRM_IOWR(0x12, drm_block_t)
+#define DRM_IOCTL_UNBLOCK DRM_IOWR(0x13, drm_block_t)
+#define DRM_IOCTL_CONTROL DRM_IOW( 0x14, drm_control_t)
+#define DRM_IOCTL_ADD_MAP DRM_IOWR(0x15, drm_map_t)
+#define DRM_IOCTL_ADD_BUFS DRM_IOWR(0x16, drm_buf_desc_t)
+#define DRM_IOCTL_MARK_BUFS DRM_IOW( 0x17, drm_buf_desc_t)
+#define DRM_IOCTL_INFO_BUFS DRM_IOWR(0x18, drm_buf_info_t)
+#define DRM_IOCTL_MAP_BUFS DRM_IOWR(0x19, drm_buf_map_t)
+#define DRM_IOCTL_FREE_BUFS DRM_IOW( 0x1a, drm_buf_free_t)
+
+#define DRM_IOCTL_RM_MAP DRM_IOW( 0x1b, drm_map_t)
+
+#define DRM_IOCTL_SET_SAREA_CTX DRM_IOW( 0x1c, drm_ctx_priv_map_t)
+#define DRM_IOCTL_GET_SAREA_CTX DRM_IOWR(0x1d, drm_ctx_priv_map_t)
+
+#define DRM_IOCTL_ADD_CTX DRM_IOWR(0x20, drm_ctx_t)
+#define DRM_IOCTL_RM_CTX DRM_IOWR(0x21, drm_ctx_t)
+#define DRM_IOCTL_MOD_CTX DRM_IOW( 0x22, drm_ctx_t)
+#define DRM_IOCTL_GET_CTX DRM_IOWR(0x23, drm_ctx_t)
+#define DRM_IOCTL_SWITCH_CTX DRM_IOW( 0x24, drm_ctx_t)
+#define DRM_IOCTL_NEW_CTX DRM_IOW( 0x25, drm_ctx_t)
+#define DRM_IOCTL_RES_CTX DRM_IOWR(0x26, drm_ctx_res_t)
+#define DRM_IOCTL_ADD_DRAW DRM_IOWR(0x27, drm_draw_t)
+#define DRM_IOCTL_RM_DRAW DRM_IOWR(0x28, drm_draw_t)
+#define DRM_IOCTL_DMA DRM_IOWR(0x29, drm_dma_t)
+#define DRM_IOCTL_LOCK DRM_IOW( 0x2a, drm_lock_t)
+#define DRM_IOCTL_UNLOCK DRM_IOW( 0x2b, drm_lock_t)
+#define DRM_IOCTL_FINISH DRM_IOW( 0x2c, drm_lock_t)
+
+#define DRM_IOCTL_AGP_ACQUIRE DRM_IO( 0x30)
+#define DRM_IOCTL_AGP_RELEASE DRM_IO( 0x31)
+#define DRM_IOCTL_AGP_ENABLE DRM_IOW( 0x32, drm_agp_mode_t)
+#define DRM_IOCTL_AGP_INFO DRM_IOR( 0x33, drm_agp_info_t)
+#define DRM_IOCTL_AGP_ALLOC DRM_IOWR(0x34, drm_agp_buffer_t)
+#define DRM_IOCTL_AGP_FREE DRM_IOW( 0x35, drm_agp_buffer_t)
+#define DRM_IOCTL_AGP_BIND DRM_IOW( 0x36, drm_agp_binding_t)
+#define DRM_IOCTL_AGP_UNBIND DRM_IOW( 0x37, drm_agp_binding_t)
+
+#define DRM_IOCTL_SG_ALLOC DRM_IOW( 0x38, drm_scatter_gather_t)
+#define DRM_IOCTL_SG_FREE DRM_IOW( 0x39, drm_scatter_gather_t)
+
+/* MGA specific ioctls */
+#define DRM_IOCTL_MGA_INIT DRM_IOW( 0x40, drm_mga_init_t)
+#define DRM_IOCTL_MGA_FLUSH DRM_IOW( 0x41, drm_lock_t)
+#define DRM_IOCTL_MGA_RESET DRM_IO( 0x42)
+#define DRM_IOCTL_MGA_SWAP DRM_IO( 0x43)
+#define DRM_IOCTL_MGA_CLEAR DRM_IOW( 0x44, drm_mga_clear_t)
+#define DRM_IOCTL_MGA_VERTEX DRM_IOW( 0x45, drm_mga_vertex_t)
+#define DRM_IOCTL_MGA_INDICES DRM_IOW( 0x46, drm_mga_indices_t)
+#define DRM_IOCTL_MGA_ILOAD DRM_IOW( 0x47, drm_mga_iload_t)
+#define DRM_IOCTL_MGA_BLIT DRM_IOW( 0x48, drm_mga_blit_t)
+
+/* i810 specific ioctls */
+#define DRM_IOCTL_I810_INIT DRM_IOW( 0x40, drm_i810_init_t)
+#define DRM_IOCTL_I810_VERTEX DRM_IOW( 0x41, drm_i810_vertex_t)
+#define DRM_IOCTL_I810_CLEAR DRM_IOW( 0x42, drm_i810_clear_t)
+#define DRM_IOCTL_I810_FLUSH DRM_IO( 0x43)
+#define DRM_IOCTL_I810_GETAGE DRM_IO( 0x44)
+#define DRM_IOCTL_I810_GETBUF DRM_IOWR(0x45, drm_i810_dma_t)
+#define DRM_IOCTL_I810_SWAP DRM_IO( 0x46)
+#define DRM_IOCTL_I810_COPY DRM_IOW( 0x47, drm_i810_copy_t)
+#define DRM_IOCTL_I810_DOCOPY DRM_IO( 0x48)
/* Rage 128 specific ioctls */
-#define DRM_IOCTL_R128_INIT DRM_IOW( 0x40, drm_r128_init_t)
-#define DRM_IOCTL_R128_RESET DRM_IO( 0x41)
-#define DRM_IOCTL_R128_FLUSH DRM_IO( 0x42)
-#define DRM_IOCTL_R128_CCEIDL DRM_IO( 0x43)
-#define DRM_IOCTL_R128_PACKET DRM_IOW( 0x44, drm_r128_packet_t)
-#define DRM_IOCTL_R128_VERTEX DRM_IOW( 0x45, drm_r128_vertex_t)
+#define DRM_IOCTL_R128_INIT DRM_IOW( 0x40, drm_r128_init_t)
+#define DRM_IOCTL_R128_CCE_START DRM_IO( 0x41)
+#define DRM_IOCTL_R128_CCE_STOP DRM_IOW( 0x42, drm_r128_cce_stop_t)
+#define DRM_IOCTL_R128_CCE_RESET DRM_IO( 0x43)
+#define DRM_IOCTL_R128_CCE_IDLE DRM_IO( 0x44)
+#define DRM_IOCTL_R128_RESET DRM_IO( 0x46)
+#define DRM_IOCTL_R128_SWAP DRM_IO( 0x47)
+#define DRM_IOCTL_R128_CLEAR DRM_IOW( 0x48, drm_r128_clear_t)
+#define DRM_IOCTL_R128_VERTEX DRM_IOW( 0x49, drm_r128_vertex_t)
+#define DRM_IOCTL_R128_INDICES DRM_IOW( 0x4a, drm_r128_indices_t)
+#define DRM_IOCTL_R128_BLIT DRM_IOW( 0x4b, drm_r128_blit_t)
+#define DRM_IOCTL_R128_DEPTH DRM_IOW( 0x4c, drm_r128_depth_t)
+#define DRM_IOCTL_R128_STIPPLE DRM_IOW( 0x4d, drm_r128_stipple_t)
+#define DRM_IOCTL_R128_INDIRECT DRM_IOWR(0x4f, drm_r128_indirect_t)
+#define DRM_IOCTL_R128_FULLSCREEN DRM_IOW( 0x50, drm_r128_fullscreen_t)
+
+/* Radeon specific ioctls */
+#define DRM_IOCTL_RADEON_CP_INIT DRM_IOW( 0x40, drm_radeon_init_t)
+#define DRM_IOCTL_RADEON_CP_START DRM_IO( 0x41)
+#define DRM_IOCTL_RADEON_CP_STOP DRM_IOW( 0x42, drm_radeon_cp_stop_t)
+#define DRM_IOCTL_RADEON_CP_RESET DRM_IO( 0x43)
+#define DRM_IOCTL_RADEON_CP_IDLE DRM_IO( 0x44)
+#define DRM_IOCTL_RADEON_RESET DRM_IO( 0x45)
+#define DRM_IOCTL_RADEON_FULLSCREEN DRM_IOW( 0x46, drm_radeon_fullscreen_t)
+#define DRM_IOCTL_RADEON_SWAP DRM_IO( 0x47)
+#define DRM_IOCTL_RADEON_CLEAR DRM_IOW( 0x48, drm_radeon_clear_t)
+#define DRM_IOCTL_RADEON_VERTEX DRM_IOW( 0x49, drm_radeon_vertex_t)
+#define DRM_IOCTL_RADEON_INDICES DRM_IOW( 0x4a, drm_radeon_indices_t)
+#define DRM_IOCTL_RADEON_STIPPLE DRM_IOW( 0x4c, drm_radeon_stipple_t)
+#define DRM_IOCTL_RADEON_INDIRECT DRM_IOWR(0x4d, drm_radeon_indirect_t)
+#define DRM_IOCTL_RADEON_TEXTURE DRM_IOWR(0x4e, drm_radeon_texture_t)
+
+/* SiS specific ioctls */
+
+#define SIS_IOCTL_FB_ALLOC DRM_IOWR(0x44, drm_sis_mem_t)
+#define SIS_IOCTL_FB_FREE DRM_IOW( 0x45, drm_sis_mem_t)
+#define SIS_IOCTL_AGP_INIT DRM_IOWR(0x53, drm_sis_agp_t)
+#define SIS_IOCTL_AGP_ALLOC DRM_IOWR(0x54, drm_sis_mem_t)
+#define SIS_IOCTL_AGP_FREE DRM_IOW( 0x55, drm_sis_mem_t)
+#define SIS_IOCTL_FLIP DRM_IOW( 0x48, drm_sis_flip_t)
+#define SIS_IOCTL_FLIP_INIT DRM_IO( 0x49)
+#define SIS_IOCTL_FLIP_FINAL DRM_IO( 0x50)
+
+/* I830 specific ioctls */
+#define DRM_IOCTL_I830_INIT DRM_IOW( 0x40, drm_i830_init_t)
+#define DRM_IOCTL_I830_VERTEX DRM_IOW( 0x41, drm_i830_vertex_t)
+#define DRM_IOCTL_I830_CLEAR DRM_IOW( 0x42, drm_i830_clear_t)
+#define DRM_IOCTL_I830_FLUSH DRM_IO ( 0x43)
+#define DRM_IOCTL_I830_GETAGE DRM_IO ( 0x44)
+#define DRM_IOCTL_I830_GETBUF DRM_IOWR(0x45, drm_i830_dma_t)
+#define DRM_IOCTL_I830_SWAP DRM_IO ( 0x46)
+#define DRM_IOCTL_I830_COPY DRM_IOW( 0x47, drm_i830_copy_t)
+#define DRM_IOCTL_I830_DOCOPY DRM_IO ( 0x48)
#endif
diff --git a/bsd/drmP.h b/bsd/drmP.h
index 7a1159c7..ead40f86 100644
--- a/bsd/drmP.h
+++ b/bsd/drmP.h
@@ -1,8 +1,8 @@
-/* drmP.h -- Private header for Direct Rendering Manager -*- c -*-
+/* drmP.h -- Private header for Direct Rendering Manager -*- linux-c -*-
* Created: Mon Jan 4 10:05:05 1999 by faith@precisioninsight.com
- * Revised: Tue Oct 12 08:51:07 1999 by faith@precisioninsight.com
*
* Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
+ * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
* All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
@@ -11,225 +11,119 @@
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
- *
+ *
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
- *
+ *
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
- * DEALINGS IN THE SOFTWARE.
- *
- * $PI: xc/programs/Xserver/hw/xfree86/os-support/linux/drm/kernel/drmP.h,v 1.58 1999/08/30 13:05:00 faith Exp $
- * $XFree86: xc/programs/Xserver/hw/xfree86/os-support/bsd/drm/kernel/drmP.h,v 1.3 2001/03/06 16:45:26 dawes Exp $
- *
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors:
+ * Rickard E. (Rik) Faith <faith@valinux.com>
+ * Gareth Hughes <gareth@valinux.com>
*/
#ifndef _DRM_P_H_
#define _DRM_P_H_
-#ifdef _KERNEL
-#include <sys/param.h>
-#include <sys/queue.h>
-#include <sys/malloc.h>
-#include <sys/kernel.h>
-#include <sys/module.h>
-#include <sys/systm.h>
-#include <sys/conf.h>
-#include <sys/stat.h>
-#include <sys/proc.h>
-#include <sys/lock.h>
-#include <sys/fcntl.h>
-#include <sys/uio.h>
-#include <sys/filio.h>
-#include <sys/sysctl.h>
-#include <sys/select.h>
-#include <sys/bus.h>
-#if __FreeBSD_version >= 400005
-#include <sys/taskqueue.h>
-#endif
+#if defined(_KERNEL) || defined(__KERNEL__)
-#if __FreeBSD_version >= 400006
-#define DRM_AGP
+/* DRM template customization defaults
+ */
+#ifndef __HAVE_AGP
+#define __HAVE_AGP 0
#endif
-
-#ifdef DRM_AGP
-#include <pci/agpvar.h>
+#ifndef __HAVE_MTRR
+#define __HAVE_MTRR 0
+#endif
+#ifndef __HAVE_CTX_BITMAP
+#define __HAVE_CTX_BITMAP 0
+#endif
+#ifndef __HAVE_DMA
+#define __HAVE_DMA 0
+#endif
+#ifndef __HAVE_DMA_IRQ
+#define __HAVE_DMA_IRQ 0
+#endif
+#ifndef __HAVE_DMA_WAITLIST
+#define __HAVE_DMA_WAITLIST 0
+#endif
+#ifndef __HAVE_DMA_FREELIST
+#define __HAVE_DMA_FREELIST 0
+#endif
+#ifndef __HAVE_DMA_HISTOGRAM
+#define __HAVE_DMA_HISTOGRAM 0
#endif
-#include "drm.h"
+#define DRM_DEBUG_CODE 0 /* Include debugging code (if > 1, then
+ also include looping detection. */
-typedef u_int32_t atomic_t;
-typedef u_int32_t cycles_t;
-typedef u_int32_t spinlock_t;
-#define atomic_set(p, v) (*(p) = (v))
-#define atomic_read(p) (*(p))
-#define atomic_inc(p) atomic_add_int(p, 1)
-#define atomic_dec(p) atomic_subtract_int(p, 1)
-#define atomic_add(n, p) atomic_add_int(p, n)
-#define atomic_sub(n, p) atomic_subtract_int(p, n)
-
-/* The version number here is a guess */
-#if __FreeBSD_version >= 500010
-#define callout_init(a) callout_init(a, 0)
-#endif
+typedef struct drm_device drm_device_t;
+typedef struct drm_file drm_file_t;
-/* Fake this */
-static __inline u_int32_t
-test_and_set_bit(int b, volatile u_int32_t *p)
-{
- int s = splhigh();
- u_int32_t m = 1<<b;
- u_int32_t r = *p & m;
- *p |= m;
- splx(s);
- return r;
-}
-
-static __inline void
-clear_bit(int b, volatile u_int32_t *p)
-{
- atomic_clear_int(p + (b >> 5), 1 << (b & 0x1f));
-}
-
-static __inline void
-set_bit(int b, volatile u_int32_t *p)
-{
- atomic_set_int(p + (b >> 5), 1 << (b & 0x1f));
-}
-
-static __inline int
-test_bit(int b, volatile u_int32_t *p)
-{
- return p[b >> 5] & (1 << (b & 0x1f));
-}
-
-static __inline int
-find_first_zero_bit(volatile u_int32_t *p, int max)
-{
- int b;
-
- for (b = 0; b < max; b += 32) {
- if (p[b >> 5] != ~0) {
- for (;;) {
- if ((p[b >> 5] & (1 << (b & 0x1f))) == 0)
- return b;
- b++;
- }
- }
- }
- return max;
-}
-
-#define spldrm() spltty()
-
-#define memset(p, v, s) bzero(p, s)
-
-/*
- * Fake out the module macros for versions of FreeBSD where they don't
- * exist.
- */
-#if __FreeBSD_version < 400002
+/* There's undoubtably more of this file to go into these OS dependent ones. */
-#define MODULE_VERSION(a,b) struct __hack
-#define MODULE_DEPEND(a,b,c,d,e) struct __hack
+#include "drm_os_freebsd.h"
-#endif
+#include "drm.h"
-#define DRM_DEBUG_CODE 0 /* Include debugging code (if > 1, then
- also include looping detection. */
-#define DRM_DMA_HISTOGRAM 1 /* Make histogram of DMA latency. */
+/* Begin the DRM... */
#define DRM_HASH_SIZE 16 /* Size of key hash table */
#define DRM_KERNEL_CONTEXT 0 /* Change drm_resctx if changed */
#define DRM_RESERVED_CONTEXTS 1 /* Change drm_resctx if changed */
#define DRM_LOOPING_LIMIT 5000000
#define DRM_BSZ 1024 /* Buffer size for /dev/drm? output */
-#define DRM_TIME_SLICE (hz/20) /* Time slice for GLXContexts */
#define DRM_LOCK_SLICE 1 /* Time slice for lock, in jiffies */
#define DRM_FLAG_DEBUG 0x01
#define DRM_FLAG_NOCTX 0x02
-#define DRM_MEM_DMA 0
-#define DRM_MEM_SAREA 1
-#define DRM_MEM_DRIVER 2
-#define DRM_MEM_MAGIC 3
-#define DRM_MEM_IOCTLS 4
-#define DRM_MEM_MAPS 5
-#define DRM_MEM_VMAS 6
-#define DRM_MEM_BUFS 7
-#define DRM_MEM_SEGS 8
-#define DRM_MEM_PAGES 9
-#define DRM_MEM_FILES 10
-#define DRM_MEM_QUEUES 11
-#define DRM_MEM_CMDS 12
-#define DRM_MEM_MAPPINGS 13
-#define DRM_MEM_BUFLISTS 14
+#define DRM_MEM_DMA 0
+#define DRM_MEM_SAREA 1
+#define DRM_MEM_DRIVER 2
+#define DRM_MEM_MAGIC 3
+#define DRM_MEM_IOCTLS 4
+#define DRM_MEM_MAPS 5
+#define DRM_MEM_VMAS 6
+#define DRM_MEM_BUFS 7
+#define DRM_MEM_SEGS 8
+#define DRM_MEM_PAGES 9
+#define DRM_MEM_FILES 10
+#define DRM_MEM_QUEUES 11
+#define DRM_MEM_CMDS 12
+#define DRM_MEM_MAPPINGS 13
+#define DRM_MEM_BUFLISTS 14
#define DRM_MEM_AGPLISTS 15
#define DRM_MEM_TOTALAGP 16
#define DRM_MEM_BOUNDAGP 17
#define DRM_MEM_CTXBITMAP 18
+#define DRM_MEM_STUB 19
+#define DRM_MEM_SGLISTS 20
#define DRM_MAX_CTXBITMAP (PAGE_SIZE * 8)
/* Backward compatibility section */
+ /* _PAGE_WT changed to _PAGE_PWT in 2.2.6 */
#ifndef _PAGE_PWT
- /* The name of _PAGE_WT was changed to
- _PAGE_PWT in Linux 2.2.6 */
#define _PAGE_PWT _PAGE_WT
#endif
-#define __drm_dummy_lock(lock) (*(__volatile__ unsigned int *)lock)
-#define _DRM_CAS(lock,old,new,__ret) \
- do { \
- int __dummy; /* Can't mark eax as clobbered */ \
- __asm__ __volatile__( \
- "lock ; cmpxchg %4,%1\n\t" \
- "setnz %0" \
- : "=d" (__ret), \
- "=m" (__drm_dummy_lock(lock)), \
- "=a" (__dummy) \
- : "2" (old), \
- "r" (new)); \
- } while (0)
-
+ /* Mapping helper macros */
+#define DRM_IOREMAP(map) \
+ (map)->handle = DRM(ioremap)( (map)->offset, (map)->size )
-
- /* Macros to make printk easier */
-#define DRM_ERROR(fmt, arg...) \
- printf("error: " "[" DRM_NAME ":" __FUNCTION__ "] *ERROR* " fmt , ##arg)
-#define DRM_MEM_ERROR(area, fmt, arg...) \
- printf("error: " "[" DRM_NAME ":" __FUNCTION__ ":%s] *ERROR* " fmt , \
- drm_mem_stats[area].name , ##arg)
-#define DRM_INFO(fmt, arg...) printf("info: " "[" DRM_NAME "] " fmt , ##arg)
-
-#if DRM_DEBUG_CODE
-#define DRM_DEBUG(fmt, arg...) \
- do { \
- if (drm_flags&DRM_FLAG_DEBUG) \
- printf("[" DRM_NAME ":" __FUNCTION__ "] " fmt , \
- ##arg); \
+#define DRM_IOREMAPFREE(map) \
+ do { \
+ if ( (map)->handle && (map)->size ) \
+ DRM(ioremapfree)( (map)->handle, (map)->size ); \
} while (0)
-#else
-#define DRM_DEBUG(fmt, arg...) do { } while (0)
-#endif
-
-#define DRM_PROC_LIMIT (PAGE_SIZE-80)
-
-#define DRM_SYSCTL_PRINT(fmt, arg...) \
- snprintf(buf, sizeof(buf), fmt, ##arg); \
- error = SYSCTL_OUT(req, buf, strlen(buf)); \
- if (error) return error;
-
-#define DRM_SYSCTL_PRINT_RET(ret, fmt, arg...) \
- snprintf(buf, sizeof(buf), fmt, ##arg); \
- error = SYSCTL_OUT(req, buf, strlen(buf)); \
- if (error) { ret; return error; }
/* Internal types and structures */
#define DRM_ARRAY_SIZE(x) (sizeof(x)/sizeof(x[0]))
@@ -240,15 +134,25 @@ find_first_zero_bit(volatile u_int32_t *p, int max)
#define DRM_BUFCOUNT(x) ((x)->count - DRM_LEFTCOUNT(x))
#define DRM_WAITCOUNT(dev,idx) DRM_BUFCOUNT(&dev->queuelist[idx]->waitlist)
+#define DRM_GET_PRIV_SAREA(_dev, _ctx, _map) do { \
+ (_map) = (_dev)->context_sareas[_ctx]; \
+} while(0)
+
+
+typedef struct drm_pci_list {
+ u16 vendor;
+ u16 device;
+} drm_pci_list_t;
+
typedef struct drm_ioctl_desc {
- d_ioctl_t *func;
+ d_ioctl_t *func;
int auth_needed;
int root_only;
} drm_ioctl_desc_t;
typedef struct drm_devstate {
pid_t owner; /* X server pid holding x_lock */
-
+
} drm_devstate_t;
typedef struct drm_magic_entry {
@@ -258,8 +162,8 @@ typedef struct drm_magic_entry {
} drm_magic_entry_t;
typedef struct drm_magic_head {
- struct drm_magic_entry *head;
- struct drm_magic_entry *tail;
+ struct drm_magic_entry *head;
+ struct drm_magic_entry *tail;
} drm_magic_head_t;
typedef struct drm_vma_entry {
@@ -279,7 +183,7 @@ typedef struct drm_buf {
struct drm_buf *next; /* Kernel-only: used for free list */
__volatile__ int waiting; /* On kernel DMA queue */
__volatile__ int pending; /* On hardware DMA queue */
- int dma_wait; /* Processes waiting */
+ wait_queue_head_t dma_wait; /* Processes waiting */
pid_t pid; /* PID of holding process */
int context; /* Kernel queue for this buffer */
int while_locked;/* Dispatch this buffer while locked */
@@ -292,15 +196,15 @@ typedef struct drm_buf {
DRM_LIST_RECLAIM = 5
} list; /* Which list we're on */
- void *dev_private;
- int dev_priv_size;
-
#if DRM_DMA_HISTOGRAM
- struct timespec time_queued; /* Queued to kernel DMA queue */
- struct timespec time_dispatched; /* Dispatched to hardware */
- struct timespec time_completed; /* Completed by hardware */
- struct timespec time_freed; /* Back on freelist */
+ cycles_t time_queued; /* Queued to kernel DMA queue */
+ cycles_t time_dispatched; /* Dispatched to hardware */
+ cycles_t time_completed; /* Completed by hardware */
+ cycles_t time_freed; /* Back on freelist */
#endif
+
+ int dev_priv_size; /* Size of buffer private stoarge */
+ void *dev_private; /* Per-buffer private storage */
} drm_buf_t;
#if DRM_DMA_HISTOGRAM
@@ -309,14 +213,14 @@ typedef struct drm_buf {
#define DRM_DMA_HISTOGRAM_NEXT(current) ((current)*10)
typedef struct drm_histogram {
atomic_t total;
-
+
atomic_t queued_to_dispatched[DRM_DMA_HISTOGRAM_SLOTS];
atomic_t dispatched_to_completed[DRM_DMA_HISTOGRAM_SLOTS];
atomic_t completed_to_freed[DRM_DMA_HISTOGRAM_SLOTS];
-
+
atomic_t queued_to_completed[DRM_DMA_HISTOGRAM_SLOTS];
atomic_t queued_to_freed[DRM_DMA_HISTOGRAM_SLOTS];
-
+
atomic_t dma[DRM_DMA_HISTOGRAM_SLOTS];
atomic_t schedule[DRM_DMA_HISTOGRAM_SLOTS];
atomic_t ctx[DRM_DMA_HISTOGRAM_SLOTS];
@@ -332,20 +236,20 @@ typedef struct drm_waitlist {
drm_buf_t **rp; /* Read pointer */
drm_buf_t **wp; /* Write pointer */
drm_buf_t **end; /* End pointer */
- spinlock_t read_lock;
- spinlock_t write_lock;
+ DRM_OS_SPINTYPE read_lock;
+ DRM_OS_SPINTYPE write_lock;
} drm_waitlist_t;
typedef struct drm_freelist {
int initialized; /* Freelist in use */
atomic_t count; /* Number of free buffers */
drm_buf_t *next; /* End pointer */
-
- int waiting; /* Processes waiting on free bufs */
+
+ wait_queue_head_t waiting; /* Processes waiting on free bufs */
int low_mark; /* Low water mark */
int high_mark; /* High water mark */
atomic_t wfh; /* If waiting for high mark */
- struct simplelock lock; /* hope this doesn't need to be linux compatible */
+ DRM_OS_SPINTYPE lock;
} drm_freelist_t;
typedef struct drm_buf_entry {
@@ -365,7 +269,7 @@ typedef struct drm_hw_lock {
} drm_hw_lock_t;
typedef TAILQ_HEAD(drm_file_list, drm_file) drm_file_list_t;
-typedef struct drm_file {
+struct drm_file {
TAILQ_ENTRY(drm_file) link;
int authenticated;
int minor;
@@ -375,38 +279,40 @@ typedef struct drm_file {
drm_magic_t magic;
unsigned long ioctl_count;
struct drm_device *devXX;
-} drm_file_t;
-
+};
typedef struct drm_queue {
atomic_t use_count; /* Outstanding uses (+1) */
atomic_t finalization; /* Finalization in progress */
atomic_t block_count; /* Count of processes waiting */
atomic_t block_read; /* Queue blocked for reads */
- int read_queue; /* Processes waiting on block_read */
+ wait_queue_head_t read_queue; /* Processes waiting on block_read */
atomic_t block_write; /* Queue blocked for writes */
- int write_queue; /* Processes waiting on block_write */
+ wait_queue_head_t write_queue; /* Processes waiting on block_write */
+#if 1
atomic_t total_queued; /* Total queued statistic */
atomic_t total_flushed;/* Total flushes statistic */
atomic_t total_locks; /* Total locks statistics */
+#endif
drm_ctx_flags_t flags; /* Context preserving and 2D-only */
drm_waitlist_t waitlist; /* Pending buffers */
- int flush_queue; /* Processes waiting until flush */
+ wait_queue_head_t flush_queue; /* Processes waiting until flush */
} drm_queue_t;
typedef struct drm_lock_data {
drm_hw_lock_t *hw_lock; /* Hardware lock */
pid_t pid; /* PID of lock holder (0=kernel) */
- int lock_queue; /* Queue of blocked processes */
+ wait_queue_head_t lock_queue; /* Queue of blocked processes */
unsigned long lock_time; /* Time of last lock in jiffies */
} drm_lock_data_t;
typedef struct drm_device_dma {
+#if 0
/* Performance Counters */
atomic_t total_prio; /* Total DRM_DMA_PRIORITY */
atomic_t total_bytes; /* Total bytes DMA'd */
atomic_t total_dmas; /* Total DMA buffers dispatched */
-
+
atomic_t total_missed_dma; /* Missed drm_do_dma */
atomic_t total_missed_lock; /* Missed lock in drm_do_dma */
atomic_t total_missed_free; /* Missed drm_free_this_buffer */
@@ -415,27 +321,28 @@ typedef struct drm_device_dma {
atomic_t total_tried; /* Tried next_buffer */
atomic_t total_hit; /* Sent next_buffer */
atomic_t total_lost; /* Lost interrupt */
+#endif
drm_buf_entry_t bufs[DRM_MAX_ORDER+1];
int buf_count;
drm_buf_t **buflist; /* Vector of pointers info bufs */
- int seg_count;
+ int seg_count;
int page_count;
- vm_offset_t *pagelist;
+ unsigned long *pagelist;
unsigned long byte_count;
enum {
- _DRM_DMA_USE_AGP = 0x01
+ _DRM_DMA_USE_AGP = 0x01,
+ _DRM_DMA_USE_SG = 0x02
} flags;
/* DMA support */
drm_buf_t *this_buffer; /* Buffer being sent */
drm_buf_t *next_buffer; /* Selected buffer to send */
drm_queue_t *next_queue; /* Queue from which buffer selected*/
- int waiting; /* Processes waiting on free bufs */
+ wait_queue_head_t waiting; /* Processes waiting on free bufs */
} drm_device_dma_t;
-#ifdef DRM_AGP
-
+#if __REALLY_HAVE_AGP
typedef struct drm_agp_mem {
void *handle;
unsigned long bound; /* address */
@@ -454,27 +361,45 @@ typedef struct drm_agp_head {
int acquired;
unsigned long base;
int agp_mtrr;
+ int cant_use_aperture;
+ unsigned long page_mask;
} drm_agp_head_t;
-
#endif
-typedef struct drm_device {
+typedef struct drm_sg_mem {
+ unsigned long handle;
+ void *virtual;
+ int pages;
+ struct page **pagelist;
+} drm_sg_mem_t;
+
+typedef struct drm_sigdata {
+ int context;
+ drm_hw_lock_t *lock;
+} drm_sigdata_t;
+
+typedef TAILQ_HEAD(drm_map_list, drm_map_list_entry) drm_map_list_t;
+typedef struct drm_map_list_entry {
+ TAILQ_ENTRY(drm_map_list_entry) link;
+ drm_map_t *map;
+} drm_map_list_entry_t;
+
+struct drm_device {
const char *name; /* Simple driver name */
char *unique; /* Unique identifier: e.g., busid */
int unique_len; /* Length of unique field */
device_t device; /* Device instance from newbus */
dev_t devnode; /* Device number for mknod */
char *devname; /* For /proc/interrupts */
-
+
int blocked; /* Blocked due to VC switch? */
int flags; /* Flags to open(2) */
int writable; /* Opened with FWRITE */
struct proc_dir_entry *root; /* Root for this device's entries */
/* Locks */
- struct simplelock count_lock; /* For inuse, open_count, buf_use */
- struct lock dev_lock; /* For others */
-
+ DRM_OS_SPINTYPE count_lock; /* For inuse, open_count, buf_use */
+ struct lock dev_lock; /* For others */
/* Usage Counters */
int open_count; /* Outstanding files open */
atomic_t ioctl_count; /* Outstanding IOCTLs pending */
@@ -482,26 +407,22 @@ typedef struct drm_device {
int buf_use; /* Buffers in use -- cannot alloc */
atomic_t buf_alloc; /* Buffer allocation in progress */
- /* Performance Counters */
- atomic_t total_open;
- atomic_t total_close;
- atomic_t total_ioctl;
- atomic_t total_irq; /* Total interruptions */
- atomic_t total_ctx; /* Total context switches */
-
- atomic_t total_locks;
- atomic_t total_unlocks;
- atomic_t total_contends;
- atomic_t total_sleeps;
+ /* Performance counters */
+ unsigned long counters;
+ drm_stat_type_t types[15];
+ atomic_t counts[15];
/* Authentication */
drm_file_list_t files;
drm_magic_head_t magiclist[DRM_HASH_SIZE];
/* Memory management */
- drm_map_t **maplist; /* Vector of pointers to regions */
+ drm_map_list_t *maplist; /* Linked list of regions */
int map_count; /* Number of mappable regions */
+ drm_map_t **context_sareas;
+ int max_context;
+
drm_vma_entry_t *vmalist; /* List of vmas (for debugging) */
drm_lock_data_t lock; /* Information on hardware lock */
@@ -513,223 +434,199 @@ typedef struct drm_device {
drm_device_dma_t *dma; /* Optional pointer for DMA support */
/* Context support */
- struct resource *irq; /* Interrupt used by board */
+ int irq; /* Interrupt used by board */
+ struct resource *irqr; /* Resource for interrupt used by board */
void *irqh; /* Handle from bus_setup_intr */
- __volatile__ long context_flag; /* Context swapping flag */
- __volatile__ long interrupt_flag;/* Interruption handler flag */
- __volatile__ long dma_flag; /* DMA dispatch flag */
- struct callout timer; /* Timer for delaying ctx switch */
- int context_wait; /* Processes waiting on ctx switch */
+ __volatile__ long context_flag; /* Context swapping flag */
+ __volatile__ long interrupt_flag; /* Interruption handler flag */
+ __volatile__ long dma_flag; /* DMA dispatch flag */
+ struct callout timer; /* Timer for delaying ctx switch */
+ wait_queue_head_t context_wait; /* Processes waiting on ctx switch */
int last_checked; /* Last context checked for DMA */
int last_context; /* Last current context */
- int last_switch; /* Time at last context switch */
+ unsigned long last_switch; /* jiffies at last context switch */
#if __FreeBSD_version >= 400005
- struct task task;
+ struct task task;
#endif
- struct timespec ctx_start;
- struct timespec lck_start;
-#if DRM_DMA_HISTOGRAM
+ cycles_t ctx_start;
+ cycles_t lck_start;
+#if __HAVE_DMA_HISTOGRAM
drm_histogram_t histo;
#endif
-
+
/* Callback to X server for context switch
and for heavy-handed reset. */
char buf[DRM_BSZ]; /* Output buffer */
char *buf_rp; /* Read pointer */
char *buf_wp; /* Write pointer */
char *buf_end; /* End pointer */
- struct sigio *buf_sigio; /* Processes waiting for SIGIO */
+ struct sigio *buf_sigio; /* Processes waiting for SIGIO */
struct selinfo buf_sel; /* Workspace for select/poll */
- int buf_readers; /* Processes waiting to read */
- int buf_writers; /* Processes waiting to ctx switch */
- int buf_selecting; /* True if poll sleeper */
+ int buf_selecting;/* True if poll sleeper */
+ wait_queue_head_t buf_readers; /* Processes waiting to read */
+ wait_queue_head_t buf_writers; /* Processes waiting to ctx switch */
/* Sysctl support */
struct drm_sysctl_info *sysctl;
-#ifdef DRM_AGP
+#if __REALLY_HAVE_AGP
drm_agp_head_t *agp;
#endif
- u_int32_t *ctx_bitmap;
+ struct pci_dev *pdev;
+#ifdef __alpha__
+#if LINUX_VERSION_CODE < 0x020403
+ struct pci_controler *hose;
+#else
+ struct pci_controller *hose;
+#endif
+#endif
+ drm_sg_mem_t *sg; /* Scatter gather memory */
+ unsigned long *ctx_bitmap;
void *dev_private;
-} drm_device_t;
-
-
- /* Internal function definitions */
-
- /* Misc. support (init.c) */
-extern int drm_flags;
-extern void drm_parse_options(char *s);
-
-
- /* Device support (fops.c) */
-extern drm_file_t *drm_find_file_by_proc(drm_device_t *dev, struct proc *p);
-extern int drm_open_helper(dev_t kdev, int flags, int fmt, struct proc *p,
- drm_device_t *dev);
-extern d_close_t drm_close;
-extern d_read_t drm_read;
-extern d_write_t drm_write;
-extern d_poll_t drm_poll;
-extern int drm_fsetown(dev_t kdev, u_long cmd, caddr_t data,
- int flags, struct proc *p);
-extern int drm_fgetown(dev_t kdev, u_long cmd, caddr_t data,
- int flags, struct proc *p);
-extern int drm_write_string(drm_device_t *dev, const char *s);
-
-#if 0
- /* Mapping support (vm.c) */
-extern unsigned long drm_vm_nopage(struct vm_area_struct *vma,
- unsigned long address,
- int write_access);
-extern unsigned long drm_vm_shm_nopage(struct vm_area_struct *vma,
- unsigned long address,
- int write_access);
-extern unsigned long drm_vm_dma_nopage(struct vm_area_struct *vma,
- unsigned long address,
- int write_access);
-extern void drm_vm_open(struct vm_area_struct *vma);
-extern void drm_vm_close(struct vm_area_struct *vma);
-extern int drm_mmap_dma(struct file *filp,
- struct vm_area_struct *vma);
+ drm_sigdata_t sigdata; /* For block_all_signals */
+ sigset_t sigmask;
+};
+
+extern int DRM(flags);
+extern void DRM(parse_options)( char *s );
+extern int DRM(cpu_valid)( void );
+
+ /* Authentication (drm_auth.h) */
+extern int DRM(add_magic)(drm_device_t *dev, drm_file_t *priv,
+ drm_magic_t magic);
+extern int DRM(remove_magic)(drm_device_t *dev, drm_magic_t magic);
+
+ /* Driver support (drm_drv.h) */
+extern int DRM(version)( DRM_OS_IOCTL );
+extern int DRM(write_string)(drm_device_t *dev, const char *s);
+
+ /* Memory management support (drm_memory.h) */
+extern void DRM(mem_init)(void);
+extern void *DRM(alloc)(size_t size, int area);
+extern void *DRM(realloc)(void *oldpt, size_t oldsize, size_t size,
+ int area);
+extern char *DRM(strdup)(const char *s, int area);
+extern void DRM(strfree)(char *s, int area);
+extern void DRM(free)(void *pt, size_t size, int area);
+extern unsigned long DRM(alloc_pages)(int order, int area);
+extern void DRM(free_pages)(unsigned long address, int order,
+ int area);
+extern void *DRM(ioremap)(unsigned long offset, unsigned long size);
+extern void DRM(ioremapfree)(void *pt, unsigned long size);
+
+#if __REALLY_HAVE_AGP
+extern agp_memory *DRM(alloc_agp)(int pages, u32 type);
+extern int DRM(free_agp)(agp_memory *handle, int pages);
+extern int DRM(bind_agp)(agp_memory *handle, unsigned int start);
+extern int DRM(unbind_agp)(agp_memory *handle);
#endif
-extern d_mmap_t drm_mmap;
- /* Proc support (proc.c) */
-extern int drm_sysctl_init(drm_device_t *dev);
-extern int drm_sysctl_cleanup(drm_device_t *dev);
+extern int DRM(context_switch)(drm_device_t *dev, int old, int new);
+extern int DRM(context_switch_complete)(drm_device_t *dev, int new);
- /* Memory management support (memory.c) */
-extern void drm_mem_init(void);
+#if __HAVE_CTX_BITMAP
+extern int DRM(ctxbitmap_init)( drm_device_t *dev );
+extern void DRM(ctxbitmap_cleanup)( drm_device_t *dev );
+extern void DRM(ctxbitmap_free)( drm_device_t *dev, int ctx_handle );
+extern int DRM(ctxbitmap_next)( drm_device_t *dev );
+#endif
-#if __FreeBSD_version < 411000
-#define DRM_SYSCTL_HANDLER_ARGS SYSCTL_HANDLER_ARGS
-#else
-#define DRM_SYSCTL_HANDLER_ARGS (SYSCTL_HANDLER_ARGS)
+ /* Locking IOCTL support (drm_lock.h) */
+extern int DRM(lock_take)(__volatile__ unsigned int *lock,
+ unsigned int context);
+extern int DRM(lock_transfer)(drm_device_t *dev,
+ __volatile__ unsigned int *lock,
+ unsigned int context);
+extern int DRM(lock_free)(drm_device_t *dev,
+ __volatile__ unsigned int *lock,
+ unsigned int context);
+extern int DRM(flush_unblock)(drm_device_t *dev, int context,
+ drm_lock_flags_t flags);
+extern int DRM(flush_block_and_flush)(drm_device_t *dev, int context,
+ drm_lock_flags_t flags);
+extern int DRM(notifier)(void *priv);
+
+ /* Buffer management support (drm_bufs.h) */
+extern int DRM(order)( unsigned long size );
+
+#if __HAVE_DMA
+ /* DMA support (drm_dma.h) */
+extern int DRM(dma_setup)(drm_device_t *dev);
+extern void DRM(dma_takedown)(drm_device_t *dev);
+extern void DRM(free_buffer)(drm_device_t *dev, drm_buf_t *buf);
+extern void DRM(reclaim_buffers)(drm_device_t *dev, pid_t pid);
+#if __HAVE_OLD_DMA
+/* GH: This is a dirty hack for now...
+ */
+extern void DRM(clear_next_buffer)(drm_device_t *dev);
+extern int DRM(select_queue)(drm_device_t *dev,
+ void (*wrapper)(unsigned long));
+extern int DRM(dma_enqueue)(drm_device_t *dev, drm_dma_t *dma);
+extern int DRM(dma_get_buffers)(drm_device_t *dev, drm_dma_t *dma);
+#endif
+#if __HAVE_DMA_IRQ
+extern int DRM(irq_install)( drm_device_t *dev, int irq );
+extern int DRM(irq_uninstall)( drm_device_t *dev );
+extern void DRM(dma_service)( DRM_OS_IRQ_ARGS );
+#if __HAVE_DMA_IRQ_BH
+extern void DRM(dma_immediate_bh)( DRM_OS_TASKQUEUE_ARGS );
#endif
-extern int drm_mem_info DRM_SYSCTL_HANDLER_ARGS;
-extern void *drm_alloc(size_t size, int area);
-extern void *drm_realloc(void *oldpt, size_t oldsize, size_t size,
- int area);
-extern char *drm_strdup(const char *s, int area);
-extern void drm_strfree(char *s, int area);
-extern void drm_free(void *pt, size_t size, int area);
-extern unsigned long drm_alloc_pages(int order, int area);
-extern void drm_free_pages(unsigned long address, int order,
- int area);
-extern void *drm_ioremap(unsigned long offset, unsigned long size);
-extern void drm_ioremapfree(void *pt, unsigned long size);
-
-#ifdef DRM_AGP
-extern void *drm_alloc_agp(int pages, u_int32_t type);
-extern int drm_free_agp(void *handle, int pages);
-extern int drm_bind_agp(void *handle, unsigned int start);
-extern int drm_unbind_agp(void *handle);
#endif
-
- /* Buffer management support (bufs.c) */
-extern int drm_order(unsigned long size);
-extern d_ioctl_t drm_addmap;
-extern d_ioctl_t drm_addbufs;
-extern d_ioctl_t drm_infobufs;
-extern d_ioctl_t drm_markbufs;
-extern d_ioctl_t drm_freebufs;
-extern d_ioctl_t drm_mapbufs;
-
-
- /* Buffer list management support (lists.c) */
-extern int drm_waitlist_create(drm_waitlist_t *bl, int count);
-extern int drm_waitlist_destroy(drm_waitlist_t *bl);
-extern int drm_waitlist_put(drm_waitlist_t *bl, drm_buf_t *buf);
-extern drm_buf_t *drm_waitlist_get(drm_waitlist_t *bl);
-
-extern int drm_freelist_create(drm_freelist_t *bl, int count);
-extern int drm_freelist_destroy(drm_freelist_t *bl);
-extern int drm_freelist_put(drm_device_t *dev, drm_freelist_t *bl,
- drm_buf_t *buf);
-extern drm_buf_t *drm_freelist_get(drm_freelist_t *bl, int block);
-
- /* DMA support (gen_dma.c) */
-extern void drm_dma_setup(drm_device_t *dev);
-extern void drm_dma_takedown(drm_device_t *dev);
-extern void drm_free_buffer(drm_device_t *dev, drm_buf_t *buf);
-extern void drm_reclaim_buffers(drm_device_t *dev, pid_t pid);
-extern int drm_context_switch(drm_device_t *dev, int old, int new);
-extern int drm_context_switch_complete(drm_device_t *dev, int new);
-extern void drm_wakeup(drm_device_t *dev, drm_buf_t *buf);
-extern void drm_clear_next_buffer(drm_device_t *dev);
-extern int drm_select_queue(drm_device_t *dev,
- void (*wrapper)(void *));
-extern int drm_dma_enqueue(drm_device_t *dev, drm_dma_t *dma);
-extern int drm_dma_get_buffers(drm_device_t *dev, drm_dma_t *dma);
#if DRM_DMA_HISTOGRAM
-extern int drm_histogram_slot(struct timespec *ts);
-extern void drm_histogram_compute(drm_device_t *dev, drm_buf_t *buf);
+extern int DRM(histogram_slot)(unsigned long count);
+extern void DRM(histogram_compute)(drm_device_t *dev, drm_buf_t *buf);
#endif
+ /* Buffer list support (drm_lists.h) */
+#if __HAVE_DMA_WAITLIST
+extern int DRM(waitlist_create)(drm_waitlist_t *bl, int count);
+extern int DRM(waitlist_destroy)(drm_waitlist_t *bl);
+extern int DRM(waitlist_put)(drm_waitlist_t *bl, drm_buf_t *buf);
+extern drm_buf_t *DRM(waitlist_get)(drm_waitlist_t *bl);
+#endif
+#if __HAVE_DMA_FREELIST
+extern int DRM(freelist_create)(drm_freelist_t *bl, int count);
+extern int DRM(freelist_destroy)(drm_freelist_t *bl);
+extern int DRM(freelist_put)(drm_device_t *dev, drm_freelist_t *bl,
+ drm_buf_t *buf);
+extern drm_buf_t *DRM(freelist_get)(drm_freelist_t *bl, int block);
+#endif
+#endif /* __HAVE_DMA */
+
+#if __REALLY_HAVE_AGP
+ /* AGP/GART support (drm_agpsupport.h) */
+extern drm_agp_head_t *DRM(agp_init)(void);
+extern void DRM(agp_uninit)(void);
+extern void DRM(agp_do_release)(void);
+extern agp_memory *DRM(agp_allocate_memory)(size_t pages, u32 type);
+extern int DRM(agp_free_memory)(agp_memory *handle);
+extern int DRM(agp_bind_memory)(agp_memory *handle, off_t start);
+extern int DRM(agp_unbind_memory)(agp_memory *handle);
+#endif
- /* Misc. IOCTL support (ioctl.c) */
-extern d_ioctl_t drm_irq_busid;
-extern d_ioctl_t drm_getunique;
-extern d_ioctl_t drm_setunique;
-
-
- /* Context IOCTL support (context.c) */
-extern d_ioctl_t drm_resctx;
-extern d_ioctl_t drm_addctx;
-extern d_ioctl_t drm_modctx;
-extern d_ioctl_t drm_getctx;
-extern d_ioctl_t drm_switchctx;
-extern d_ioctl_t drm_newctx;
-extern d_ioctl_t drm_rmctx;
-
-
- /* Drawable IOCTL support (drawable.c) */
-extern d_ioctl_t drm_adddraw;
-extern d_ioctl_t drm_rmdraw;
-
-
- /* Authentication IOCTL support (auth.c) */
-extern int drm_add_magic(drm_device_t *dev, drm_file_t *priv,
- drm_magic_t magic);
-extern int drm_remove_magic(drm_device_t *dev, drm_magic_t magic);
-extern d_ioctl_t drm_getmagic;
-extern d_ioctl_t drm_authmagic;
-
-
- /* Locking IOCTL support (lock.c) */
-extern d_ioctl_t drm_block;
-extern d_ioctl_t drm_unblock;
-extern int drm_lock_take(__volatile__ unsigned int *lock,
- unsigned int context);
-extern int drm_lock_transfer(drm_device_t *dev,
- __volatile__ unsigned int *lock,
- unsigned int context);
-extern int drm_lock_free(drm_device_t *dev,
- __volatile__ unsigned int *lock,
- unsigned int context);
-extern d_ioctl_t drm_finish;
-extern int drm_flush_unblock(drm_device_t *dev, int context,
- drm_lock_flags_t flags);
-extern int drm_flush_block_and_flush(drm_device_t *dev, int context,
- drm_lock_flags_t flags);
-
- /* Context Bitmap support (ctxbitmap.c) */
-extern int drm_ctxbitmap_init(drm_device_t *dev);
-extern void drm_ctxbitmap_cleanup(drm_device_t *dev);
-extern int drm_ctxbitmap_next(drm_device_t *dev);
-extern void drm_ctxbitmap_free(drm_device_t *dev, int ctx_handle);
-
-#ifdef DRM_AGP
- /* AGP/GART support (agpsupport.c) */
-extern drm_agp_head_t *drm_agp_init(void);
-extern d_ioctl_t drm_agp_acquire;
-extern d_ioctl_t drm_agp_release;
-extern d_ioctl_t drm_agp_enable;
-extern d_ioctl_t drm_agp_info;
-extern d_ioctl_t drm_agp_alloc;
-extern d_ioctl_t drm_agp_free;
-extern d_ioctl_t drm_agp_unbind;
-extern d_ioctl_t drm_agp_bind;
+ /* Proc support (drm_proc.h) */
+extern struct proc_dir_entry *DRM(proc_init)(drm_device_t *dev,
+ int minor,
+ struct proc_dir_entry *root,
+ struct proc_dir_entry **dev_root);
+extern int DRM(proc_cleanup)(int minor,
+ struct proc_dir_entry *root,
+ struct proc_dir_entry *dev_root);
+
+#if __HAVE_SG
+ /* Scatter Gather Support (drm_scatter.h) */
+extern void DRM(sg_cleanup)(drm_sg_mem_t *entry);
#endif
+
+#if __REALLY_HAVE_SG
+ /* ATI PCIGART support (ati_pcigart.h) */
+extern int DRM(ati_pcigart_init)(drm_device_t *dev,
+ unsigned long *addr,
+ dma_addr_t *bus_addr);
+extern int DRM(ati_pcigart_cleanup)(drm_device_t *dev,
+ unsigned long addr,
+ dma_addr_t bus_addr);
#endif
+
+#endif /* __KERNEL__ */
#endif
diff --git a/bsd/drm_agpsupport.h b/bsd/drm_agpsupport.h
new file mode 100644
index 00000000..ac12c867
--- /dev/null
+++ b/bsd/drm_agpsupport.h
@@ -0,0 +1,326 @@
+/* drm_agpsupport.h -- DRM support for AGP/GART backend -*- linux-c -*-
+ * Created: Mon Dec 13 09:56:45 1999 by faith@precisioninsight.com
+ *
+ * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
+ * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Author:
+ * Rickard E. (Rik) Faith <faith@valinux.com>
+ * Gareth Hughes <gareth@valinux.com>
+ */
+
+#include "drmP.h"
+
+#include <vm/vm.h>
+#include <vm/pmap.h>
+#if __REALLY_HAVE_AGP
+#include <sys/agpio.h>
+#endif
+
+int DRM(agp_info)(DRM_OS_IOCTL)
+{
+ drm_device_t *dev = kdev->si_drv1;
+ struct agp_info *kern;
+ drm_agp_info_t info;
+
+ if (!dev->agp || !dev->agp->acquired) return EINVAL;
+
+ kern = &dev->agp->info;
+ agp_get_info(dev->agp->agpdev, kern);
+ info.agp_version_major = 1;
+ info.agp_version_minor = 0;
+ info.mode = kern->ai_mode;
+ info.aperture_base = kern->ai_aperture_base;
+ info.aperture_size = kern->ai_aperture_size;
+ info.memory_allowed = kern->ai_memory_allowed;
+ info.memory_used = kern->ai_memory_used;
+ info.id_vendor = kern->ai_devid & 0xffff;
+ info.id_device = kern->ai_devid >> 16;
+
+ *(drm_agp_info_t *) data = info;
+ return 0;
+}
+
+int DRM(agp_acquire)(DRM_OS_IOCTL)
+{
+ drm_device_t *dev = kdev->si_drv1;
+ int retcode;
+
+ if (!dev->agp || dev->agp->acquired) return EINVAL;
+ retcode = agp_acquire(dev->agp->agpdev);
+ if (retcode) return retcode;
+ dev->agp->acquired = 1;
+ return 0;
+}
+
+int DRM(agp_release)(DRM_OS_IOCTL)
+{
+ drm_device_t *dev = kdev->si_drv1;
+
+ if (!dev->agp || !dev->agp->acquired)
+ return EINVAL;
+ agp_release(dev->agp->agpdev);
+ dev->agp->acquired = 0;
+ return 0;
+
+}
+
+void DRM(agp_do_release)(void)
+{
+ device_t agpdev;
+
+ agpdev = agp_find_device();
+ if (agpdev)
+ agp_release(agpdev);
+}
+
+int DRM(agp_enable)(DRM_OS_IOCTL)
+{
+ drm_device_t *dev = kdev->si_drv1;
+ drm_agp_mode_t mode;
+
+ if (!dev->agp || !dev->agp->acquired) return EINVAL;
+
+ mode = *(drm_agp_mode_t *) data;
+
+ dev->agp->mode = mode.mode;
+ agp_enable(dev->agp->agpdev, mode.mode);
+ dev->agp->base = dev->agp->info.ai_aperture_base;
+ dev->agp->enabled = 1;
+ return 0;
+}
+
+int DRM(agp_alloc)(DRM_OS_IOCTL)
+{
+ drm_device_t *dev = kdev->si_drv1;
+ drm_agp_buffer_t request;
+ drm_agp_mem_t *entry;
+ void *handle;
+ unsigned long pages;
+ u_int32_t type;
+ struct agp_memory_info info;
+
+ if (!dev->agp || !dev->agp->acquired) return EINVAL;
+
+ request = *(drm_agp_buffer_t *) data;
+
+ if (!(entry = DRM(alloc)(sizeof(*entry), DRM_MEM_AGPLISTS)))
+ return ENOMEM;
+
+ bzero(entry, sizeof(*entry));
+
+ pages = (request.size + PAGE_SIZE - 1) / PAGE_SIZE;
+ type = (u_int32_t) request.type;
+
+ if (!(handle = DRM(alloc_agp)(pages, type))) {
+ DRM(free)(entry, sizeof(*entry), DRM_MEM_AGPLISTS);
+ return ENOMEM;
+ }
+
+ entry->handle = handle;
+ entry->bound = 0;
+ entry->pages = pages;
+ entry->prev = NULL;
+ entry->next = dev->agp->memory;
+ if (dev->agp->memory) dev->agp->memory->prev = entry;
+ dev->agp->memory = entry;
+
+ agp_memory_info(dev->agp->agpdev, entry->handle, &info);
+
+ request.handle = (unsigned long) entry->handle;
+ request.physical = info.ami_physical;
+
+ *(drm_agp_buffer_t *) data = request;
+
+ return 0;
+}
+
+static drm_agp_mem_t * DRM(agp_lookup_entry)(drm_device_t *dev, void *handle)
+{
+ drm_agp_mem_t *entry;
+
+ for (entry = dev->agp->memory; entry; entry = entry->next) {
+ if (entry->handle == handle) return entry;
+ }
+ return NULL;
+}
+
+int DRM(agp_unbind)(DRM_OS_IOCTL)
+{
+ drm_device_t *dev = kdev->si_drv1;
+ drm_agp_binding_t request;
+ drm_agp_mem_t *entry;
+ int retcode;
+
+ if (!dev->agp || !dev->agp->acquired) return EINVAL;
+ request = *(drm_agp_binding_t *) data;
+ if (!(entry = DRM(agp_lookup_entry)(dev, (void *) request.handle)))
+ return EINVAL;
+ if (!entry->bound) return EINVAL;
+ retcode=DRM(unbind_agp)(entry->handle);
+ if (!retcode)
+ {
+ entry->bound=0;
+ return 0;
+ }
+ else
+ return retcode;
+}
+
+int DRM(agp_bind)(DRM_OS_IOCTL)
+{
+ drm_device_t *dev = kdev->si_drv1;
+ drm_agp_binding_t request;
+ drm_agp_mem_t *entry;
+ int retcode;
+ int page;
+
+ DRM_DEBUG("agp_bind, page_size=%x\n", PAGE_SIZE);
+ if (!dev->agp || !dev->agp->acquired)
+ return EINVAL;
+ request = *(drm_agp_binding_t *) data;
+ if (!(entry = DRM(agp_lookup_entry)(dev, (void *) request.handle)))
+ return EINVAL;
+ if (entry->bound) return EINVAL;
+ page = (request.offset + PAGE_SIZE - 1) / PAGE_SIZE;
+ if ((retcode = DRM(bind_agp)(entry->handle, page)))
+ return retcode;
+ entry->bound = dev->agp->base + (page << PAGE_SHIFT);
+ return 0;
+}
+
+int DRM(agp_free)(DRM_OS_IOCTL)
+{
+ drm_device_t *dev = kdev->si_drv1;
+ drm_agp_buffer_t request;
+ drm_agp_mem_t *entry;
+
+ if (!dev->agp || !dev->agp->acquired) return EINVAL;
+ request = *(drm_agp_buffer_t *) data;
+ if (!(entry = DRM(agp_lookup_entry)(dev, (void*) request.handle)))
+ return EINVAL;
+ if (entry->bound) DRM(unbind_agp)(entry->handle);
+
+ if (entry->prev) entry->prev->next = entry->next;
+ else dev->agp->memory = entry->next;
+ if (entry->next) entry->next->prev = entry->prev;
+ DRM(free_agp)(entry->handle, entry->pages);
+ DRM(free)(entry, sizeof(*entry), DRM_MEM_AGPLISTS);
+ return 0;
+}
+
+drm_agp_head_t *DRM(agp_init)(void)
+{
+ device_t agpdev;
+ drm_agp_head_t *head = NULL;
+ int agp_available = 1;
+
+ agpdev = agp_find_device();
+ if (!agpdev)
+ agp_available = 0;
+
+ DRM_DEBUG("agp_available = %d\n", agp_available);
+
+ if (agp_available) {
+ if (!(head = DRM(alloc)(sizeof(*head), DRM_MEM_AGPLISTS)))
+ return NULL;
+ bzero((void *)head, sizeof(*head));
+ head->agpdev = agpdev;
+ agp_get_info(agpdev, &head->info);
+ head->memory = NULL;
+#if 0 /* bogus */
+ switch (head->agp_info.chipset) {
+ case INTEL_GENERIC: head->chipset = "Intel"; break;
+ case INTEL_LX: head->chipset = "Intel 440LX"; break;
+ case INTEL_BX: head->chipset = "Intel 440BX"; break;
+ case INTEL_GX: head->chipset = "Intel 440GX"; break;
+ case INTEL_I810: head->chipset = "Intel i810"; break;
+ case VIA_GENERIC: head->chipset = "VIA"; break;
+ case VIA_VP3: head->chipset = "VIA VP3"; break;
+ case VIA_MVP3: head->chipset = "VIA MVP3"; break;
+ case VIA_APOLLO_PRO: head->chipset = "VIA Apollo Pro"; break;
+ case SIS_GENERIC: head->chipset = "SiS"; break;
+ case AMD_GENERIC: head->chipset = "AMD"; break;
+ case AMD_IRONGATE: head->chipset = "AMD Irongate"; break;
+ case ALI_GENERIC: head->chipset = "ALi"; break;
+ case ALI_M1541: head->chipset = "ALi M1541"; break;
+ default:
+ }
+#endif
+ DRM_INFO("AGP at 0x%08x %dMB\n",
+ head->info.ai_aperture_base,
+ head->info.ai_aperture_size >> 20);
+ }
+ return head;
+}
+
+void DRM(agp_uninit)(void)
+{
+/* FIXME: What goes here */
+}
+
+
+agp_memory *DRM(agp_allocate_memory)(size_t pages, u32 type)
+{
+ device_t agpdev;
+
+ agpdev = agp_find_device();
+ if (!agpdev)
+ return NULL;
+
+ return agp_alloc_memory(agpdev, type, pages << AGP_PAGE_SHIFT);
+}
+
+int DRM(agp_free_memory)(agp_memory *handle)
+{
+ device_t agpdev;
+
+ agpdev = agp_find_device();
+ if (!agpdev || !handle)
+ return 0;
+
+ agp_free_memory(agpdev, handle);
+ return 1;
+}
+
+int DRM(agp_bind_memory)(agp_memory *handle, off_t start)
+{
+ device_t agpdev;
+
+ agpdev = agp_find_device();
+ if (!agpdev || !handle)
+ return EINVAL;
+
+ return agp_bind_memory(agpdev, handle, start * PAGE_SIZE);
+}
+
+int DRM(agp_unbind_memory)(agp_memory *handle)
+{
+ device_t agpdev;
+
+ agpdev = agp_find_device();
+ if (!agpdev || !handle)
+ return EINVAL;
+
+ return agp_unbind_memory(agpdev, handle);
+}
diff --git a/bsd/drm_auth.h b/bsd/drm_auth.h
new file mode 100644
index 00000000..f2c2d8da
--- /dev/null
+++ b/bsd/drm_auth.h
@@ -0,0 +1,166 @@
+/* drm_auth.h -- IOCTLs for authentication -*- linux-c -*-
+ * Created: Tue Feb 2 08:37:54 1999 by faith@valinux.com
+ *
+ * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
+ * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors:
+ * Rickard E. (Rik) Faith <faith@valinux.com>
+ * Gareth Hughes <gareth@valinux.com>
+ */
+
+#define __NO_VERSION__
+#include "drmP.h"
+
+static int DRM(hash_magic)(drm_magic_t magic)
+{
+ return magic & (DRM_HASH_SIZE-1);
+}
+
+static drm_file_t *DRM(find_file)(drm_device_t *dev, drm_magic_t magic)
+{
+ drm_file_t *retval = NULL;
+ drm_magic_entry_t *pt;
+ int hash = DRM(hash_magic)(magic);
+
+ DRM_OS_LOCK;
+ for (pt = dev->magiclist[hash].head; pt; pt = pt->next) {
+ if (pt->magic == magic) {
+ retval = pt->priv;
+ break;
+ }
+ }
+ DRM_OS_UNLOCK;
+ return retval;
+}
+
+int DRM(add_magic)(drm_device_t *dev, drm_file_t *priv, drm_magic_t magic)
+{
+ int hash;
+ drm_magic_entry_t *entry;
+
+ DRM_DEBUG("%d\n", magic);
+
+ hash = DRM(hash_magic)(magic);
+ entry = (drm_magic_entry_t*) DRM(alloc)(sizeof(*entry), DRM_MEM_MAGIC);
+ if (!entry) DRM_OS_RETURN(ENOMEM);
+ entry->magic = magic;
+ entry->priv = priv;
+ entry->next = NULL;
+
+ DRM_OS_LOCK;
+ if (dev->magiclist[hash].tail) {
+ dev->magiclist[hash].tail->next = entry;
+ dev->magiclist[hash].tail = entry;
+ } else {
+ dev->magiclist[hash].head = entry;
+ dev->magiclist[hash].tail = entry;
+ }
+ DRM_OS_UNLOCK;
+
+ return 0;
+}
+
+int DRM(remove_magic)(drm_device_t *dev, drm_magic_t magic)
+{
+ drm_magic_entry_t *prev = NULL;
+ drm_magic_entry_t *pt;
+ int hash;
+
+ DRM_DEBUG("%d\n", magic);
+ hash = DRM(hash_magic)(magic);
+
+ DRM_OS_LOCK;
+ for (pt = dev->magiclist[hash].head; pt; prev = pt, pt = pt->next) {
+ if (pt->magic == magic) {
+ if (dev->magiclist[hash].head == pt) {
+ dev->magiclist[hash].head = pt->next;
+ }
+ if (dev->magiclist[hash].tail == pt) {
+ dev->magiclist[hash].tail = prev;
+ }
+ if (prev) {
+ prev->next = pt->next;
+ }
+ DRM_OS_UNLOCK;
+ DRM(free)(pt, sizeof(*pt), DRM_MEM_MAGIC);
+ return 0;
+ }
+ }
+ DRM_OS_UNLOCK;
+
+ DRM(free)(pt, sizeof(*pt), DRM_MEM_MAGIC);
+ DRM_OS_RETURN(EINVAL);
+}
+
+int DRM(getmagic)(DRM_OS_IOCTL)
+{
+ static drm_magic_t sequence = 0;
+ drm_auth_t auth;
+ static DRM_OS_SPINTYPE lock;
+ static int first = 1;
+ DRM_OS_DEVICE;
+ DRM_OS_PRIV;
+
+ if (first) {
+ DRM_OS_SPININIT(lock, "drm getmagic");
+ first = 0;
+ }
+
+ /* Find unique magic */
+ if (priv->magic) {
+ auth.magic = priv->magic;
+ } else {
+ do {
+ DRM_OS_SPINLOCK(&lock);
+ if (!sequence) ++sequence; /* reserve 0 */
+ auth.magic = sequence++;
+ DRM_OS_SPINUNLOCK(&lock);
+ } while (DRM(find_file)(dev, auth.magic));
+ priv->magic = auth.magic;
+ DRM(add_magic)(dev, priv, auth.magic);
+ }
+
+ DRM_DEBUG("%u\n", auth.magic);
+
+ DRM_OS_KRNTOUSR((drm_auth_t *)data, auth, sizeof(auth));
+
+ return 0;
+}
+
+int DRM(authmagic)(DRM_OS_IOCTL)
+{
+ drm_auth_t auth;
+ drm_file_t *file;
+ DRM_OS_DEVICE;
+
+ DRM_OS_KRNFROMUSR(auth, (drm_auth_t *)data, sizeof(auth));
+
+ DRM_DEBUG("%u\n", auth.magic);
+ if ((file = DRM(find_file)(dev, auth.magic))) {
+ file->authenticated = 1;
+ DRM(remove_magic)(dev, auth.magic);
+ return 0;
+ }
+ DRM_OS_RETURN(EINVAL);
+}
diff --git a/bsd/drm_bufs.h b/bsd/drm_bufs.h
new file mode 100644
index 00000000..d55b36d8
--- /dev/null
+++ b/bsd/drm_bufs.h
@@ -0,0 +1,1092 @@
+/* drm_bufs.h -- Generic buffer template -*- linux-c -*-
+ * Created: Thu Nov 23 03:10:50 2000 by gareth@valinux.com
+ *
+ * Copyright 1999, 2000 Precision Insight, Inc., Cedar Park, Texas.
+ * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors:
+ * Rickard E. (Rik) Faith <faith@valinux.com>
+ * Gareth Hughes <gareth@valinux.com>
+ */
+
+#define __NO_VERSION__
+#include <machine/param.h>
+#include <sys/mman.h>
+#include <vm/vm.h>
+#include <vm/pmap.h>
+#include <vm/vm_extern.h>
+#include <vm/vm_map.h>
+#include <vm/vm_param.h>
+#include "drmP.h"
+
+#ifndef __HAVE_PCI_DMA
+#define __HAVE_PCI_DMA 0
+#endif
+
+#ifndef __HAVE_SG
+#define __HAVE_SG 0
+#endif
+
+#ifndef DRIVER_BUF_PRIV_T
+#define DRIVER_BUF_PRIV_T u32
+#endif
+#ifndef DRIVER_AGP_BUFFERS_MAP
+#if __HAVE_AGP && __HAVE_DMA
+#error "You must define DRIVER_AGP_BUFFERS_MAP()"
+#else
+#define DRIVER_AGP_BUFFERS_MAP( dev ) NULL
+#endif
+#endif
+
+/*
+ * Compute order. Can be made faster.
+ */
+int DRM(order)( unsigned long size )
+{
+ int order;
+ unsigned long tmp;
+
+ for ( order = 0, tmp = size ; tmp >>= 1 ; ++order );
+
+ if ( size & ~(1 << order) )
+ ++order;
+
+ return order;
+}
+
+int DRM(addmap)( DRM_OS_IOCTL )
+{
+ DRM_OS_DEVICE;
+ drm_map_t *map;
+ drm_map_list_entry_t *list;
+
+ if (!(dev->flags & (FREAD|FWRITE)))
+ DRM_OS_RETURN(EACCES); /* Require read/write */
+
+ map = (drm_map_t *) DRM(alloc)( sizeof(*map), DRM_MEM_MAPS );
+ if ( !map )
+ DRM_OS_RETURN(ENOMEM);
+
+ *map = *(drm_map_t *)data;
+
+ /* Only allow shared memory to be removable since we only keep enough
+ * book keeping information about shared memory to allow for removal
+ * when processes fork.
+ */
+ if ( (map->flags & _DRM_REMOVABLE) && map->type != _DRM_SHM ) {
+ DRM(free)( map, sizeof(*map), DRM_MEM_MAPS );
+ DRM_OS_RETURN(EINVAL);
+ }
+ DRM_DEBUG( "offset = 0x%08lx, size = 0x%08lx, type = %d\n",
+ map->offset, map->size, map->type );
+ if ( (map->offset & PAGE_MASK) || (map->size & PAGE_MASK) ) {
+ DRM(free)( map, sizeof(*map), DRM_MEM_MAPS );
+ DRM_OS_RETURN(EINVAL);
+ }
+ map->mtrr = -1;
+ map->handle = 0;
+
+ TAILQ_FOREACH(list, dev->maplist, link) {
+ drm_map_t *entry = list->map;
+ if ( (entry->offset >= map->offset
+ && (entry->offset) < (map->offset + map->size) )
+ || ((entry->offset + entry->size) >= map->offset
+ && (entry->offset + entry->size) < (map->offset + map->size) )
+ || ((entry->offset < map->offset)
+ && (entry->offset + entry->size) >= (map->offset + map->size) ) )
+ DRM_DEBUG("map collission: add(0x%lx-0x%lx), current(0x%lx-0x%lx)\n",
+ entry->offset, entry->offset + entry->size - 1,
+ map->offset, map->offset + map->size - 1);
+ }
+
+ switch ( map->type ) {
+ case _DRM_REGISTERS:
+ case _DRM_FRAME_BUFFER:
+#if !defined(__sparc__) && !defined(__alpha__)
+ if ( map->offset + map->size < map->offset
+ ) {
+ DRM(free)( map, sizeof(*map), DRM_MEM_MAPS );
+ DRM_OS_RETURN(EINVAL);
+ }
+#endif
+#ifdef __alpha__
+ map->offset += dev->hose->mem_space->start;
+#endif
+#if __REALLY_HAVE_MTRR
+ if ( map->type == _DRM_FRAME_BUFFER ||
+ (map->flags & _DRM_WRITE_COMBINING) ) {
+ map->mtrr = mtrr_add( map->offset, map->size,
+ MTRR_TYPE_WRCOMB, 1 );
+ }
+#endif
+ map->handle = DRM(ioremap)( map->offset, map->size );
+ break;
+
+ case _DRM_SHM:
+ DRM_INFO( "%ld %d %d\n",
+ map->size, DRM(order)( map->size ), PAGE_SHIFT);
+ map->handle = (void *)DRM(alloc_pages)
+ (DRM(order)(map->size) - PAGE_SHIFT, DRM_MEM_SAREA);
+ DRM_DEBUG( "%ld %d %p\n",
+ map->size, DRM(order)( map->size ), map->handle );
+ if ( !map->handle ) {
+ DRM(free)( map, sizeof(*map), DRM_MEM_MAPS );
+ DRM_OS_RETURN(ENOMEM);
+ }
+ map->offset = (unsigned long)map->handle;
+ if ( map->flags & _DRM_CONTAINS_LOCK ) {
+ dev->lock.hw_lock = map->handle; /* Pointer to lock */
+ }
+ break;
+#if __REALLY_HAVE_AGP
+ case _DRM_AGP:
+#ifdef __alpha__
+ map->offset += dev->hose->mem_space->start;
+#endif
+ map->offset += dev->agp->base;
+ map->mtrr = dev->agp->agp_mtrr; /* for getmap */
+ break;
+#endif
+ case _DRM_SCATTER_GATHER:
+ if (!dev->sg) {
+ DRM(free)(map, sizeof(*map), DRM_MEM_MAPS);
+ DRM_OS_RETURN(EINVAL);
+ }
+ map->offset = map->offset + dev->sg->handle;
+ break;
+
+ default:
+ DRM(free)( map, sizeof(*map), DRM_MEM_MAPS );
+ DRM_OS_RETURN(EINVAL);
+ }
+
+ list = DRM(alloc)(sizeof(*list), DRM_MEM_MAPS);
+ if(!list) {
+ DRM(free)(map, sizeof(*map), DRM_MEM_MAPS);
+ DRM_OS_RETURN(EINVAL);
+ }
+ memset(list, 0, sizeof(*list));
+ list->map = map;
+
+ DRM_OS_LOCK;
+ TAILQ_INSERT_TAIL(dev->maplist, list, link);
+ DRM_OS_UNLOCK;
+
+ *(drm_map_t *)data = *map;
+
+ if ( map->type != _DRM_SHM ) {
+ ((drm_map_t *)data)->handle = (void *)map->offset;
+ }
+ return 0;
+}
+
+
+/* Remove a map private from list and deallocate resources if the mapping
+ * isn't in use.
+ */
+
+int DRM(rmmap)( DRM_OS_IOCTL )
+{
+ DRM_OS_DEVICE;
+ drm_map_list_entry_t *list;
+ drm_map_t *map;
+ drm_map_t request;
+ int found_maps = 0;
+
+ DRM_OS_KRNFROMUSR( request, (drm_map_t *)data, sizeof(request) );
+
+ DRM_OS_LOCK;
+ TAILQ_FOREACH(list, dev->maplist, link) {
+ map = list->map;
+ if(map->handle == request.handle &&
+ map->flags & _DRM_REMOVABLE) break;
+ }
+
+ /* List has wrapped around to the head pointer, or its empty we didn't
+ * find anything.
+ */
+ if(list == NULL) {
+ DRM_OS_UNLOCK;
+ DRM_OS_RETURN(EINVAL);
+ }
+ TAILQ_REMOVE(dev->maplist, list, link);
+ DRM(free)(list, sizeof(*list), DRM_MEM_MAPS);
+
+
+ if(!found_maps) {
+ switch (map->type) {
+ case _DRM_REGISTERS:
+ case _DRM_FRAME_BUFFER:
+#if __REALLY_HAVE_MTRR
+ if (map->mtrr >= 0) {
+ int retcode;
+ retcode = mtrr_del(map->mtrr,
+ map->offset,
+ map->size);
+ DRM_DEBUG("mtrr_del = %d\n", retcode);
+ }
+#endif
+ DRM(ioremapfree)(map->handle, map->size);
+ break;
+ case _DRM_SHM:
+ DRM(free_pages)( (unsigned long)map->handle, DRM(order)(map->size), DRM_MEM_SAREA );
+ break;
+ case _DRM_AGP:
+ case _DRM_SCATTER_GATHER:
+ break;
+ }
+ DRM(free)(map, sizeof(*map), DRM_MEM_MAPS);
+ }
+ DRM_OS_UNLOCK;
+ return 0;
+}
+
+#if __HAVE_DMA
+
+
+static void DRM(cleanup_buf_error)(drm_buf_entry_t *entry)
+{
+ int i;
+
+ if (entry->seg_count) {
+ for (i = 0; i < entry->seg_count; i++) {
+ DRM(free_pages)(entry->seglist[i],
+ entry->page_order,
+ DRM_MEM_DMA);
+ }
+ DRM(free)(entry->seglist,
+ entry->seg_count *
+ sizeof(*entry->seglist),
+ DRM_MEM_SEGS);
+
+ entry->seg_count = 0;
+ }
+
+ if(entry->buf_count) {
+ for(i = 0; i < entry->buf_count; i++) {
+ if(entry->buflist[i].dev_private) {
+ DRM(free)(entry->buflist[i].dev_private,
+ entry->buflist[i].dev_priv_size,
+ DRM_MEM_BUFS);
+ }
+ }
+ DRM(free)(entry->buflist,
+ entry->buf_count *
+ sizeof(*entry->buflist),
+ DRM_MEM_BUFS);
+
+#if __HAVE_DMA_FREELIST
+ DRM(freelist_destroy)(&entry->freelist);
+#endif
+
+ entry->buf_count = 0;
+ }
+}
+
+#if __REALLY_HAVE_AGP
+int DRM(addbufs_agp)( DRM_OS_IOCTL )
+{
+ DRM_OS_DEVICE;
+ drm_device_dma_t *dma = dev->dma;
+ drm_buf_desc_t request;
+ drm_buf_entry_t *entry;
+ drm_buf_t *buf;
+ unsigned long offset;
+ unsigned long agp_offset;
+ int count;
+ int order;
+ int size;
+ int alignment;
+ int page_order;
+ int total;
+ int byte_count;
+ int i;
+ drm_buf_t **temp_buflist;
+
+ if ( !dma ) DRM_OS_RETURN(EINVAL);
+
+ DRM_OS_KRNFROMUSR( request, (drm_buf_desc_t *)data, sizeof(request) );
+
+ count = request.count;
+ order = DRM(order)( request.size );
+ size = 1 << order;
+
+ alignment = (request.flags & _DRM_PAGE_ALIGN)
+ ? round_page(size) : size;
+ page_order = order - PAGE_SHIFT > 0 ? order - PAGE_SHIFT : 0;
+ total = PAGE_SIZE << page_order;
+
+ byte_count = 0;
+ agp_offset = dev->agp->base + request.agp_start;
+
+ DRM_DEBUG( "count: %d\n", count );
+ DRM_DEBUG( "order: %d\n", order );
+ DRM_DEBUG( "size: %d\n", size );
+ DRM_DEBUG( "agp_offset: 0x%lx\n", agp_offset );
+ DRM_DEBUG( "alignment: %d\n", alignment );
+ DRM_DEBUG( "page_order: %d\n", page_order );
+ DRM_DEBUG( "total: %d\n", total );
+
+ if ( order < DRM_MIN_ORDER || order > DRM_MAX_ORDER )
+ DRM_OS_RETURN(EINVAL);
+ if ( dev->queue_count )
+ DRM_OS_RETURN(EBUSY); /* Not while in use */
+
+ DRM_OS_SPINLOCK( &dev->count_lock );
+ if ( dev->buf_use ) {
+ DRM_OS_SPINUNLOCK( &dev->count_lock );
+ DRM_OS_RETURN(EBUSY);
+ }
+ atomic_inc( &dev->buf_alloc );
+ DRM_OS_SPINUNLOCK( &dev->count_lock );
+
+ DRM_OS_LOCK;
+ entry = &dma->bufs[order];
+ if ( entry->buf_count ) {
+ DRM_OS_UNLOCK;
+ atomic_dec( &dev->buf_alloc );
+ DRM_OS_RETURN(ENOMEM); /* May only call once for each order */
+ }
+
+ if (count < 0 || count > 4096) {
+ DRM_OS_UNLOCK;
+ atomic_dec( &dev->buf_alloc );
+ DRM_OS_RETURN(EINVAL);
+ }
+
+ entry->buflist = DRM(alloc)( count * sizeof(*entry->buflist),
+ DRM_MEM_BUFS );
+ if ( !entry->buflist ) {
+ DRM_OS_UNLOCK;
+ atomic_dec( &dev->buf_alloc );
+ DRM_OS_RETURN(ENOMEM);
+ }
+ memset( entry->buflist, 0, count * sizeof(*entry->buflist) );
+
+ entry->buf_size = size;
+ entry->page_order = page_order;
+
+ offset = 0;
+
+ while ( entry->buf_count < count ) {
+ buf = &entry->buflist[entry->buf_count];
+ buf->idx = dma->buf_count + entry->buf_count;
+ buf->total = alignment;
+ buf->order = order;
+ buf->used = 0;
+
+ buf->offset = (dma->byte_count + offset);
+ buf->bus_address = agp_offset + offset;
+ buf->address = (void *)(agp_offset + offset);
+ buf->next = NULL;
+ buf->waiting = 0;
+ buf->pending = 0;
+ buf->dma_wait = 0;
+ buf->pid = 0;
+
+ buf->dev_priv_size = sizeof(DRIVER_BUF_PRIV_T);
+ buf->dev_private = DRM(alloc)( sizeof(DRIVER_BUF_PRIV_T),
+ DRM_MEM_BUFS );
+ if(!buf->dev_private) {
+ /* Set count correctly so we free the proper amount. */
+ entry->buf_count = count;
+ DRM(cleanup_buf_error)(entry);
+ }
+ memset( buf->dev_private, 0, buf->dev_priv_size );
+
+#if __HAVE_DMA_HISTOGRAM
+ buf->time_queued = 0;
+ buf->time_dispatched = 0;
+ buf->time_completed = 0;
+ buf->time_freed = 0;
+#endif
+
+ offset += alignment;
+ entry->buf_count++;
+ byte_count += PAGE_SIZE << page_order;
+ }
+
+ DRM_DEBUG( "byte_count: %d\n", byte_count );
+
+ temp_buflist = DRM(realloc)( dma->buflist,
+ dma->buf_count * sizeof(*dma->buflist),
+ (dma->buf_count + entry->buf_count)
+ * sizeof(*dma->buflist),
+ DRM_MEM_BUFS );
+ if(!temp_buflist) {
+ /* Free the entry because it isn't valid */
+ DRM(cleanup_buf_error)(entry);
+ DRM_OS_UNLOCK;
+ atomic_dec( &dev->buf_alloc );
+ DRM_OS_RETURN(ENOMEM);
+ }
+ dma->buflist = temp_buflist;
+
+ for ( i = 0 ; i < entry->buf_count ; i++ ) {
+ dma->buflist[i + dma->buf_count] = &entry->buflist[i];
+ }
+
+ dma->buf_count += entry->buf_count;
+ dma->byte_count += byte_count;
+
+ DRM_DEBUG( "dma->buf_count : %d\n", dma->buf_count );
+ DRM_DEBUG( "entry->buf_count : %d\n", entry->buf_count );
+
+#if __HAVE_DMA_FREELIST
+ DRM(freelist_create)( &entry->freelist, entry->buf_count );
+ for ( i = 0 ; i < entry->buf_count ; i++ ) {
+ DRM(freelist_put)( dev, &entry->freelist, &entry->buflist[i] );
+ }
+#endif
+ DRM_OS_UNLOCK;
+
+ request.count = entry->buf_count;
+ request.size = size;
+
+ DRM_OS_KRNTOUSR( (drm_buf_desc_t *)data, request, sizeof(request) );
+
+ dma->flags = _DRM_DMA_USE_AGP;
+
+ atomic_dec( &dev->buf_alloc );
+ return 0;
+}
+#endif /* __REALLY_HAVE_AGP */
+
+#if __HAVE_PCI_DMA
+int DRM(addbufs_pci)( DRM_OS_IOCTL )
+{
+ DRM_OS_DEVICE;
+ drm_device_dma_t *dma = dev->dma;
+ drm_buf_desc_t request;
+ int count;
+ int order;
+ int size;
+ int total;
+ int page_order;
+ drm_buf_entry_t *entry;
+ unsigned long page;
+ drm_buf_t *buf;
+ int alignment;
+ unsigned long offset;
+ int i;
+ int byte_count;
+ int page_count;
+ unsigned long *temp_pagelist;
+ drm_buf_t **temp_buflist;
+
+ if ( !dma ) DRM_OS_RETURN(EINVAL);
+
+ DRM_OS_KRNFROMUSR( request, (drm_buf_desc_t *)data, sizeof(request) );
+
+ count = request.count;
+ order = DRM(order)( request.size );
+ size = 1 << order;
+
+ DRM_DEBUG( "count=%d, size=%d (%d), order=%d, queue_count=%d\n",
+ request.count, request.size, size,
+ order, dev->queue_count );
+
+ if ( order < DRM_MIN_ORDER || order > DRM_MAX_ORDER )
+ DRM_OS_RETURN(EINVAL);
+ if ( dev->queue_count )
+ DRM_OS_RETURN(EBUSY); /* Not while in use */
+
+ alignment = (request.flags & _DRM_PAGE_ALIGN)
+ ? round_page(size) : size;
+ page_order = order - PAGE_SHIFT > 0 ? order - PAGE_SHIFT : 0;
+ total = PAGE_SIZE << page_order;
+
+ DRM_OS_SPINLOCK( &dev->count_lock );
+ if ( dev->buf_use ) {
+ DRM_OS_SPINUNLOCK( &dev->count_lock );
+ DRM_OS_RETURN(EBUSY);
+ }
+ atomic_inc( &dev->buf_alloc );
+ DRM_OS_SPINUNLOCK( &dev->count_lock );
+
+ DRM_OS_LOCK;
+ entry = &dma->bufs[order];
+ if ( entry->buf_count ) {
+ DRM_OS_UNLOCK;
+ atomic_dec( &dev->buf_alloc );
+ DRM_OS_RETURN(ENOMEM); /* May only call once for each order */
+ }
+
+ if (count < 0 || count > 4096) {
+ DRM_OS_UNLOCK;
+ atomic_dec( &dev->buf_alloc );
+ DRM_OS_RETURN(EINVAL);
+ }
+
+ entry->buflist = DRM(alloc)( count * sizeof(*entry->buflist),
+ DRM_MEM_BUFS );
+ if ( !entry->buflist ) {
+ DRM_OS_UNLOCK;
+ atomic_dec( &dev->buf_alloc );
+ DRM_OS_RETURN(ENOMEM);
+ }
+ memset( entry->buflist, 0, count * sizeof(*entry->buflist) );
+
+ entry->seglist = DRM(alloc)( count * sizeof(*entry->seglist),
+ DRM_MEM_SEGS );
+ if ( !entry->seglist ) {
+ DRM(free)( entry->buflist,
+ count * sizeof(*entry->buflist),
+ DRM_MEM_BUFS );
+ DRM_OS_UNLOCK;
+ atomic_dec( &dev->buf_alloc );
+ DRM_OS_RETURN(ENOMEM);
+ }
+ memset( entry->seglist, 0, count * sizeof(*entry->seglist) );
+
+ temp_pagelist = DRM(realloc)( dma->pagelist,
+ dma->page_count * sizeof(*dma->pagelist),
+ (dma->page_count + (count << page_order))
+ * sizeof(*dma->pagelist),
+ DRM_MEM_PAGES );
+ if(!temp_pagelist) {
+ DRM(free)( entry->buflist,
+ count * sizeof(*entry->buflist),
+ DRM_MEM_BUFS );
+ DRM(free)( entry->seglist,
+ count * sizeof(*entry->seglist),
+ DRM_MEM_SEGS );
+ DRM_OS_UNLOCK;
+ atomic_dec( &dev->buf_alloc );
+ DRM_OS_RETURN(ENOMEM);
+ }
+
+ dma->pagelist = temp_pagelist;
+ DRM_DEBUG( "pagelist: %d entries\n",
+ dma->page_count + (count << page_order) );
+
+ entry->buf_size = size;
+ entry->page_order = page_order;
+ byte_count = 0;
+ page_count = 0;
+
+ while ( entry->buf_count < count ) {
+ page = DRM(alloc_pages)( page_order, DRM_MEM_DMA );
+ if ( !page ) break;
+ entry->seglist[entry->seg_count++] = page;
+ for ( i = 0 ; i < (1 << page_order) ; i++ ) {
+ DRM_DEBUG( "page %d @ 0x%08lx\n",
+ dma->page_count + page_count,
+ page + PAGE_SIZE * i );
+ dma->pagelist[dma->page_count + page_count++]
+ = page + PAGE_SIZE * i;
+ }
+ for ( offset = 0 ;
+ offset + size <= total && entry->buf_count < count ;
+ offset += alignment, ++entry->buf_count ) {
+ buf = &entry->buflist[entry->buf_count];
+ buf->idx = dma->buf_count + entry->buf_count;
+ buf->total = alignment;
+ buf->order = order;
+ buf->used = 0;
+ buf->offset = (dma->byte_count + byte_count + offset);
+ buf->address = (void *)(page + offset);
+ buf->next = NULL;
+ buf->waiting = 0;
+ buf->pending = 0;
+ buf->dma_wait = 0;
+ buf->pid = 0;
+#if __HAVE_DMA_HISTOGRAM
+ buf->time_queued = 0;
+ buf->time_dispatched = 0;
+ buf->time_completed = 0;
+ buf->time_freed = 0;
+#endif
+ DRM_DEBUG( "buffer %d @ %p\n",
+ entry->buf_count, buf->address );
+ }
+ byte_count += PAGE_SIZE << page_order;
+ }
+
+ temp_buflist = DRM(realloc)( dma->buflist,
+ dma->buf_count * sizeof(*dma->buflist),
+ (dma->buf_count + entry->buf_count)
+ * sizeof(*dma->buflist),
+ DRM_MEM_BUFS );
+ if(!temp_buflist) {
+ /* Free the entry because it isn't valid */
+ DRM(cleanup_buf_error)(entry);
+ DRM_OS_UNLOCK;
+ atomic_dec( &dev->buf_alloc );
+ DRM_OS_RETURN(ENOMEM);
+ }
+ dma->buflist = temp_buflist;
+
+ for ( i = 0 ; i < entry->buf_count ; i++ ) {
+ dma->buflist[i + dma->buf_count] = &entry->buflist[i];
+ }
+
+ dma->buf_count += entry->buf_count;
+ dma->seg_count += entry->seg_count;
+ dma->page_count += entry->seg_count << page_order;
+ dma->byte_count += PAGE_SIZE * (entry->seg_count << page_order);
+
+#if __HAVE_DMA_FREELIST
+ DRM(freelist_create)( &entry->freelist, entry->buf_count );
+ for ( i = 0 ; i < entry->buf_count ; i++ ) {
+ DRM(freelist_put)( dev, &entry->freelist, &entry->buflist[i] );
+ }
+#endif
+ DRM_OS_UNLOCK;
+
+ request.count = entry->buf_count;
+ request.size = size;
+
+ DRM_OS_KRNTOUSR( (drm_buf_desc_t *)data, request, sizeof(request) );
+
+ atomic_dec( &dev->buf_alloc );
+ return 0;
+
+}
+#endif /* __HAVE_PCI_DMA */
+
+#if __REALLY_HAVE_SG
+int DRM(addbufs_sg)( DRM_OS_IOCTL )
+{
+ DRM_OS_DEVICE;
+ drm_device_dma_t *dma = dev->dma;
+ drm_buf_desc_t request;
+ drm_buf_entry_t *entry;
+ drm_buf_t *buf;
+ unsigned long offset;
+ unsigned long agp_offset;
+ int count;
+ int order;
+ int size;
+ int alignment;
+ int page_order;
+ int total;
+ int byte_count;
+ int i;
+ drm_buf_t **temp_buflist;
+
+ if ( !dma ) DRM_OS_RETURN(EINVAL);
+
+ DRM_OS_KRNFROMUSR( request, (drm_buf_desc_t *)data, sizeof(request) );
+
+ count = request.count;
+ order = DRM(order)( request.size );
+ size = 1 << order;
+
+ alignment = (request.flags & _DRM_PAGE_ALIGN)
+ ? round_page(size) : size;
+ page_order = order - PAGE_SHIFT > 0 ? order - PAGE_SHIFT : 0;
+ total = PAGE_SIZE << page_order;
+
+ byte_count = 0;
+ agp_offset = request.agp_start;
+
+ DRM_DEBUG( "count: %d\n", count );
+ DRM_DEBUG( "order: %d\n", order );
+ DRM_DEBUG( "size: %d\n", size );
+ DRM_DEBUG( "agp_offset: %ld\n", agp_offset );
+ DRM_DEBUG( "alignment: %d\n", alignment );
+ DRM_DEBUG( "page_order: %d\n", page_order );
+ DRM_DEBUG( "total: %d\n", total );
+
+ if ( order < DRM_MIN_ORDER || order > DRM_MAX_ORDER )
+ DRM_OS_RETURN(EINVAL);
+ if ( dev->queue_count ) DRM_OS_RETURN(EBUSY); /* Not while in use */
+
+ DRM_OS_SPINLOCK( &dev->count_lock );
+ if ( dev->buf_use ) {
+ DRM_OS_SPINUNLOCK( &dev->count_lock );
+ DRM_OS_RETURN(EBUSY);
+ }
+ atomic_inc( &dev->buf_alloc );
+ DRM_OS_SPINUNLOCK( &dev->count_lock );
+
+ DRM_OS_LOCK;
+ entry = &dma->bufs[order];
+ if ( entry->buf_count ) {
+ DRM_OS_UNLOCK;
+ atomic_dec( &dev->buf_alloc );
+ DRM_OS_RETURN(ENOMEM); /* May only call once for each order */
+ }
+
+ if (count < 0 || count > 4096) {
+ DRM_OS_UNLOCK;
+ atomic_dec( &dev->buf_alloc );
+ DRM_OS_RETURN(EINVAL);
+ }
+
+ entry->buflist = DRM(alloc)( count * sizeof(*entry->buflist),
+ DRM_MEM_BUFS );
+ if ( !entry->buflist ) {
+ DRM_OS_UNLOCK;
+ atomic_dec( &dev->buf_alloc );
+ DRM_OS_RETURN(ENOMEM);
+ }
+ memset( entry->buflist, 0, count * sizeof(*entry->buflist) );
+
+ entry->buf_size = size;
+ entry->page_order = page_order;
+
+ offset = 0;
+
+ while ( entry->buf_count < count ) {
+ buf = &entry->buflist[entry->buf_count];
+ buf->idx = dma->buf_count + entry->buf_count;
+ buf->total = alignment;
+ buf->order = order;
+ buf->used = 0;
+
+ buf->offset = (dma->byte_count + offset);
+ buf->bus_address = agp_offset + offset;
+ buf->address = (void *)(agp_offset + offset + dev->sg->handle);
+ buf->next = NULL;
+ buf->waiting = 0;
+ buf->pending = 0;
+ buf->dma_wait = 0;
+ buf->pid = 0;
+
+ buf->dev_priv_size = sizeof(DRIVER_BUF_PRIV_T);
+ buf->dev_private = DRM(alloc)( sizeof(DRIVER_BUF_PRIV_T),
+ DRM_MEM_BUFS );
+ if(!buf->dev_private) {
+ /* Set count correctly so we free the proper amount. */
+ entry->buf_count = count;
+ DRM(cleanup_buf_error)(entry);
+ DRM_OS_UNLOCK;
+ atomic_dec( &dev->buf_alloc );
+ DRM_OS_RETURN(ENOMEM);
+ }
+
+ memset( buf->dev_private, 0, buf->dev_priv_size );
+
+# if __HAVE_DMA_HISTOGRAM
+ buf->time_queued = 0;
+ buf->time_dispatched = 0;
+ buf->time_completed = 0;
+ buf->time_freed = 0;
+# endif
+ DRM_DEBUG( "buffer %d @ %p\n",
+ entry->buf_count, buf->address );
+
+ offset += alignment;
+ entry->buf_count++;
+ byte_count += PAGE_SIZE << page_order;
+ }
+
+ DRM_DEBUG( "byte_count: %d\n", byte_count );
+
+ temp_buflist = DRM(realloc)( dma->buflist,
+ dma->buf_count * sizeof(*dma->buflist),
+ (dma->buf_count + entry->buf_count)
+ * sizeof(*dma->buflist),
+ DRM_MEM_BUFS );
+ if(!temp_buflist) {
+ /* Free the entry because it isn't valid */
+ DRM(cleanup_buf_error)(entry);
+ DRM_OS_UNLOCK;
+ atomic_dec( &dev->buf_alloc );
+ DRM_OS_RETURN(ENOMEM);
+ }
+ dma->buflist = temp_buflist;
+
+ for ( i = 0 ; i < entry->buf_count ; i++ ) {
+ dma->buflist[i + dma->buf_count] = &entry->buflist[i];
+ }
+
+ dma->buf_count += entry->buf_count;
+ dma->byte_count += byte_count;
+
+ DRM_DEBUG( "dma->buf_count : %d\n", dma->buf_count );
+ DRM_DEBUG( "entry->buf_count : %d\n", entry->buf_count );
+
+#if __HAVE_DMA_FREELIST
+ DRM(freelist_create)( &entry->freelist, entry->buf_count );
+ for ( i = 0 ; i < entry->buf_count ; i++ ) {
+ DRM(freelist_put)( dev, &entry->freelist, &entry->buflist[i] );
+ }
+#endif
+ DRM_OS_UNLOCK;
+
+ request.count = entry->buf_count;
+ request.size = size;
+
+ DRM_OS_KRNTOUSR( (drm_buf_desc_t *)data, request, sizeof(request) );
+
+ dma->flags = _DRM_DMA_USE_SG;
+
+ atomic_dec( &dev->buf_alloc );
+ return 0;
+}
+#endif /* __REALLY_HAVE_SG */
+
+int DRM(addbufs)( DRM_OS_IOCTL )
+{
+ drm_buf_desc_t request;
+
+ DRM_OS_KRNFROMUSR( request, (drm_buf_desc_t *)data, sizeof(request) );
+
+#if __REALLY_HAVE_AGP
+ if ( request.flags & _DRM_AGP_BUFFER )
+ return DRM(addbufs_agp)( kdev, cmd, data, flags, p );
+ else
+#endif
+#if __REALLY_HAVE_SG
+ if ( request.flags & _DRM_SG_BUFFER )
+ return DRM(addbufs_sg)( kdev, cmd, data, flags, p );
+ else
+#endif
+#if __HAVE_PCI_DMA
+ return DRM(addbufs_pci)( kdev, cmd, data, flags, p );
+#else
+ DRM_OS_RETURN(EINVAL);
+#endif
+}
+
+int DRM(infobufs)( DRM_OS_IOCTL )
+{
+ DRM_OS_DEVICE;
+ drm_device_dma_t *dma = dev->dma;
+ drm_buf_info_t request;
+ int i;
+ int count;
+
+ if ( !dma ) DRM_OS_RETURN(EINVAL);
+
+ DRM_OS_SPINLOCK( &dev->count_lock );
+ if ( atomic_read( &dev->buf_alloc ) ) {
+ DRM_OS_SPINUNLOCK( &dev->count_lock );
+ DRM_OS_RETURN(EBUSY);
+ }
+ ++dev->buf_use; /* Can't allocate more after this call */
+ DRM_OS_SPINUNLOCK( &dev->count_lock );
+
+ DRM_OS_KRNFROMUSR( request, (drm_buf_info_t *)data, sizeof(request) );
+
+ for ( i = 0, count = 0 ; i < DRM_MAX_ORDER + 1 ; i++ ) {
+ if ( dma->bufs[i].buf_count ) ++count;
+ }
+
+ DRM_DEBUG( "count = %d\n", count );
+
+ if ( request.count >= count ) {
+ for ( i = 0, count = 0 ; i < DRM_MAX_ORDER + 1 ; i++ ) {
+ if ( dma->bufs[i].buf_count ) {
+ drm_buf_desc_t *to = &request.list[count];
+ drm_buf_entry_t *from = &dma->bufs[i];
+ drm_freelist_t *list = &dma->bufs[i].freelist;
+ if ( DRM_OS_COPYTOUSR( &to->count,
+ &from->buf_count,
+ sizeof(from->buf_count) ) ||
+ DRM_OS_COPYTOUSR( &to->size,
+ &from->buf_size,
+ sizeof(from->buf_size) ) ||
+ DRM_OS_COPYTOUSR( &to->low_mark,
+ &list->low_mark,
+ sizeof(list->low_mark) ) ||
+ DRM_OS_COPYTOUSR( &to->high_mark,
+ &list->high_mark,
+ sizeof(list->high_mark) ) )
+ DRM_OS_RETURN(EFAULT);
+
+ DRM_DEBUG( "%d %d %d %d %d\n",
+ i,
+ dma->bufs[i].buf_count,
+ dma->bufs[i].buf_size,
+ dma->bufs[i].freelist.low_mark,
+ dma->bufs[i].freelist.high_mark );
+ ++count;
+ }
+ }
+ }
+ request.count = count;
+
+ DRM_OS_KRNTOUSR( (drm_buf_info_t *)data, request, sizeof(request) );
+
+ return 0;
+}
+
+int DRM(markbufs)( DRM_OS_IOCTL )
+{
+ DRM_OS_DEVICE;
+ drm_device_dma_t *dma = dev->dma;
+ drm_buf_desc_t request;
+ int order;
+ drm_buf_entry_t *entry;
+
+ if ( !dma ) DRM_OS_RETURN(EINVAL);
+
+ DRM_OS_KRNFROMUSR( request, (drm_buf_desc_t *)data, sizeof(request) );
+
+ DRM_DEBUG( "%d, %d, %d\n",
+ request.size, request.low_mark, request.high_mark );
+ order = DRM(order)( request.size );
+ if ( order < DRM_MIN_ORDER || order > DRM_MAX_ORDER )
+ DRM_OS_RETURN(EINVAL);
+ entry = &dma->bufs[order];
+
+ if ( request.low_mark < 0 || request.low_mark > entry->buf_count )
+ DRM_OS_RETURN(EINVAL);
+ if ( request.high_mark < 0 || request.high_mark > entry->buf_count )
+ DRM_OS_RETURN(EINVAL);
+
+ entry->freelist.low_mark = request.low_mark;
+ entry->freelist.high_mark = request.high_mark;
+
+ return 0;
+}
+
+int DRM(freebufs)( DRM_OS_IOCTL )
+{
+ DRM_OS_DEVICE;
+ drm_device_dma_t *dma = dev->dma;
+ drm_buf_free_t request;
+ int i;
+ int idx;
+ drm_buf_t *buf;
+
+ if ( !dma ) DRM_OS_RETURN(EINVAL);
+
+ DRM_OS_KRNFROMUSR( request, (drm_buf_free_t *)data, sizeof(request) );
+
+ DRM_DEBUG( "%d\n", request.count );
+ for ( i = 0 ; i < request.count ; i++ ) {
+ if ( DRM_OS_COPYFROMUSR( &idx,
+ &request.list[i],
+ sizeof(idx) ) )
+ DRM_OS_RETURN(EFAULT);
+ if ( idx < 0 || idx >= dma->buf_count ) {
+ DRM_ERROR( "Index %d (of %d max)\n",
+ idx, dma->buf_count - 1 );
+ DRM_OS_RETURN(EINVAL);
+ }
+ buf = dma->buflist[idx];
+ if ( buf->pid != DRM_OS_CURRENTPID ) {
+ DRM_ERROR( "Process %d freeing buffer owned by %d\n",
+ DRM_OS_CURRENTPID, buf->pid );
+ DRM_OS_RETURN(EINVAL);
+ }
+ DRM(free_buffer)( dev, buf );
+ }
+
+ return 0;
+}
+
+int DRM(mapbufs)( DRM_OS_IOCTL )
+{
+ DRM_OS_DEVICE;
+ drm_device_dma_t *dma = dev->dma;
+ int retcode = 0;
+ const int zero = 0;
+ vm_offset_t virtual, address;
+#if __FreeBSD_version >= 500000
+ struct vmspace *vms = p->td_proc->p_vmspace;
+#else
+ struct vmspace *vms = p->p_vmspace;
+#endif
+ drm_buf_map_t request;
+ int i;
+
+ if ( !dma ) DRM_OS_RETURN(EINVAL);
+
+ DRM_OS_SPINLOCK( &dev->count_lock );
+ if ( atomic_read( &dev->buf_alloc ) ) {
+ DRM_OS_SPINUNLOCK( &dev->count_lock );
+ DRM_OS_RETURN(EBUSY);
+ }
+ dev->buf_use++; /* Can't allocate more after this call */
+ DRM_OS_SPINUNLOCK( &dev->count_lock );
+
+ DRM_OS_KRNFROMUSR( request, (drm_buf_map_t *)data, sizeof(request) );
+
+ if ( request.count >= dma->buf_count ) {
+ if ( (__HAVE_AGP && (dma->flags & _DRM_DMA_USE_AGP)) ||
+ (__HAVE_SG && (dma->flags & _DRM_DMA_USE_SG)) ) {
+ drm_map_t *map = DRIVER_AGP_BUFFERS_MAP( dev );
+
+ if ( !map ) {
+ retcode = EINVAL;
+ goto done;
+ }
+
+ virtual = round_page((vm_offset_t)vms->vm_daddr + MAXDSIZ);
+ retcode = vm_mmap(&vms->vm_map,
+ &virtual,
+ round_page(map->size),
+ PROT_READ|PROT_WRITE, VM_PROT_ALL,
+ MAP_SHARED,
+ SLIST_FIRST(&kdev->si_hlist),
+ (unsigned long)map->offset );
+ } else {
+ virtual = round_page((vm_offset_t)vms->vm_daddr + MAXDSIZ);
+ retcode = vm_mmap(&vms->vm_map,
+ &virtual,
+ round_page(dma->byte_count),
+ PROT_READ|PROT_WRITE, VM_PROT_ALL,
+ MAP_SHARED,
+ SLIST_FIRST(&kdev->si_hlist),
+ 0);
+ }
+ if (retcode)
+ goto done;
+ request.virtual = (void *)virtual;
+
+ for ( i = 0 ; i < dma->buf_count ; i++ ) {
+ if ( DRM_OS_COPYTOUSR( &request.list[i].idx,
+ &dma->buflist[i]->idx,
+ sizeof(request.list[0].idx) ) ) {
+ retcode = EFAULT;
+ goto done;
+ }
+ if ( DRM_OS_COPYTOUSR( &request.list[i].total,
+ &dma->buflist[i]->total,
+ sizeof(request.list[0].total) ) ) {
+ retcode = EFAULT;
+ goto done;
+ }
+ if ( DRM_OS_COPYTOUSR( &request.list[i].used,
+ &zero,
+ sizeof(zero) ) ) {
+ retcode = EFAULT;
+ goto done;
+ }
+ address = virtual + dma->buflist[i]->offset; /* *** */
+ if ( DRM_OS_COPYTOUSR( &request.list[i].address,
+ &address,
+ sizeof(address) ) ) {
+ retcode = EFAULT;
+ goto done;
+ }
+ }
+ }
+ done:
+ request.count = dma->buf_count;
+
+ DRM_DEBUG( "%d buffers, retcode = %d\n", request.count, retcode );
+
+ DRM_OS_KRNTOUSR( (drm_buf_map_t *)data, request, sizeof(request) );
+
+ DRM_OS_RETURN(retcode);
+}
+
+#endif /* __HAVE_DMA */
+
diff --git a/bsd/drm_context.h b/bsd/drm_context.h
new file mode 100644
index 00000000..8d676a23
--- /dev/null
+++ b/bsd/drm_context.h
@@ -0,0 +1,730 @@
+/* drm_context.h -- IOCTLs for generic contexts -*- linux-c -*-
+ * Created: Fri Nov 24 18:31:37 2000 by gareth@valinux.com
+ *
+ * Copyright 1999, 2000 Precision Insight, Inc., Cedar Park, Texas.
+ * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors:
+ * Rickard E. (Rik) Faith <faith@valinux.com>
+ * Gareth Hughes <gareth@valinux.com>
+ */
+
+#define __NO_VERSION__
+#include "drmP.h"
+
+#if __HAVE_CTX_BITMAP
+
+/* ================================================================
+ * Context bitmap support
+ */
+
+void DRM(ctxbitmap_free)( drm_device_t *dev, int ctx_handle )
+{
+ if ( ctx_handle < 0 ) goto failed;
+ if ( !dev->ctx_bitmap ) goto failed;
+
+ if ( ctx_handle < DRM_MAX_CTXBITMAP ) {
+ DRM_OS_LOCK;
+ clear_bit( ctx_handle, dev->ctx_bitmap );
+ dev->context_sareas[ctx_handle] = NULL;
+ DRM_OS_UNLOCK;
+ return;
+ }
+failed:
+ DRM_ERROR( "Attempt to free invalid context handle: %d\n",
+ ctx_handle );
+ return;
+}
+
+int DRM(ctxbitmap_next)( drm_device_t *dev )
+{
+ int bit;
+
+ if(!dev->ctx_bitmap) return -1;
+
+ DRM_OS_LOCK;
+ bit = find_first_zero_bit( dev->ctx_bitmap, DRM_MAX_CTXBITMAP );
+ if ( bit < DRM_MAX_CTXBITMAP ) {
+ set_bit( bit, dev->ctx_bitmap );
+ DRM_DEBUG( "drm_ctxbitmap_next bit : %d\n", bit );
+ if((bit+1) > dev->max_context) {
+ dev->max_context = (bit+1);
+ if(dev->context_sareas) {
+ drm_map_t **ctx_sareas;
+
+ ctx_sareas = DRM(realloc)(dev->context_sareas,
+ (dev->max_context - 1) *
+ sizeof(*dev->context_sareas),
+ dev->max_context *
+ sizeof(*dev->context_sareas),
+ DRM_MEM_MAPS);
+ if(!ctx_sareas) {
+ clear_bit(bit, dev->ctx_bitmap);
+ DRM_OS_UNLOCK;
+ return -1;
+ }
+ dev->context_sareas = ctx_sareas;
+ dev->context_sareas[bit] = NULL;
+ } else {
+ /* max_context == 1 at this point */
+ dev->context_sareas = DRM(alloc)(
+ dev->max_context *
+ sizeof(*dev->context_sareas),
+ DRM_MEM_MAPS);
+ if(!dev->context_sareas) {
+ clear_bit(bit, dev->ctx_bitmap);
+ DRM_OS_UNLOCK;
+ return -1;
+ }
+ dev->context_sareas[bit] = NULL;
+ }
+ }
+ DRM_OS_UNLOCK;
+ return bit;
+ }
+ DRM_OS_UNLOCK;
+ return -1;
+}
+
+int DRM(ctxbitmap_init)( drm_device_t *dev )
+{
+ int i;
+ int temp;
+
+ DRM_OS_LOCK;
+ dev->ctx_bitmap = (unsigned long *) DRM(alloc)( PAGE_SIZE,
+ DRM_MEM_CTXBITMAP );
+ if ( dev->ctx_bitmap == NULL ) {
+ DRM_OS_UNLOCK;
+ DRM_OS_RETURN(ENOMEM);
+ }
+ memset( (void *)dev->ctx_bitmap, 0, PAGE_SIZE );
+ dev->context_sareas = NULL;
+ dev->max_context = -1;
+ DRM_OS_UNLOCK;
+
+ for ( i = 0 ; i < DRM_RESERVED_CONTEXTS ; i++ ) {
+ temp = DRM(ctxbitmap_next)( dev );
+ DRM_DEBUG( "drm_ctxbitmap_init : %d\n", temp );
+ }
+
+ return 0;
+}
+
+void DRM(ctxbitmap_cleanup)( drm_device_t *dev )
+{
+ DRM_OS_LOCK;
+ if( dev->context_sareas ) DRM(free)( dev->context_sareas,
+ sizeof(*dev->context_sareas) *
+ dev->max_context,
+ DRM_MEM_MAPS );
+ DRM(free)( (void *)dev->ctx_bitmap, PAGE_SIZE, DRM_MEM_CTXBITMAP );
+ DRM_OS_UNLOCK;
+}
+
+/* ================================================================
+ * Per Context SAREA Support
+ */
+
+int DRM(getsareactx)( DRM_OS_IOCTL )
+{
+ DRM_OS_DEVICE;
+ drm_ctx_priv_map_t request;
+ drm_map_t *map;
+
+ DRM_OS_KRNFROMUSR( request, (drm_ctx_priv_map_t *)data,
+ sizeof(request) );
+
+ DRM_OS_LOCK;
+ if (dev->max_context < 0 || request.ctx_id >= (unsigned) dev->max_context) {
+ DRM_OS_UNLOCK;
+ DRM_OS_RETURN(EINVAL);
+ }
+
+ map = dev->context_sareas[request.ctx_id];
+ DRM_OS_UNLOCK;
+
+ request.handle = map->handle;
+
+ DRM_OS_KRNTOUSR( (drm_ctx_priv_map_t *)data, request, sizeof(request) );
+
+ return 0;
+}
+
+int DRM(setsareactx)( DRM_OS_IOCTL )
+{
+ DRM_OS_DEVICE;
+ drm_ctx_priv_map_t request;
+ drm_map_t *map = NULL;
+ drm_map_list_entry_t *list;
+
+ DRM_OS_KRNFROMUSR( request, (drm_ctx_priv_map_t *)data,
+ sizeof(request) );
+
+ DRM_OS_LOCK;
+ TAILQ_FOREACH(list, dev->maplist, link) {
+ map=list->map;
+ if(map->handle == request.handle)
+ goto found;
+ }
+
+bad:
+ DRM_OS_UNLOCK;
+ return -EINVAL;
+
+found:
+ map = list->map;
+ if (!map) goto bad;
+ if (dev->max_context < 0)
+ goto bad;
+ if (request.ctx_id >= (unsigned) dev->max_context)
+ goto bad;
+ dev->context_sareas[request.ctx_id] = map;
+ DRM_OS_UNLOCK;
+ return 0;
+}
+
+/* ================================================================
+ * The actual DRM context handling routines
+ */
+
+int DRM(context_switch)( drm_device_t *dev, int old, int new )
+{
+ char buf[64];
+
+ if ( test_and_set_bit( 0, &dev->context_flag ) ) {
+ DRM_ERROR( "Reentering -- FIXME\n" );
+ DRM_OS_RETURN(EBUSY);
+ }
+
+#if __HAVE_DMA_HISTOGRAM
+ dev->ctx_start = get_cycles();
+#endif
+
+ DRM_DEBUG( "Context switch from %d to %d\n", old, new );
+
+ if ( new == dev->last_context ) {
+ clear_bit( 0, &dev->context_flag );
+ return 0;
+ }
+
+ if ( DRM(flags) & DRM_FLAG_NOCTX ) {
+ DRM(context_switch_complete)( dev, new );
+ } else {
+ sprintf( buf, "C %d %d\n", old, new );
+ DRM(write_string)( dev, buf );
+ }
+
+ return 0;
+}
+
+int DRM(context_switch_complete)( drm_device_t *dev, int new )
+{
+ dev->last_context = new; /* PRE/POST: This is the _only_ writer. */
+ dev->last_switch = jiffies;
+
+ if ( !_DRM_LOCK_IS_HELD(dev->lock.hw_lock->lock) ) {
+ DRM_ERROR( "Lock isn't held after context switch\n" );
+ }
+
+ /* If a context switch is ever initiated
+ when the kernel holds the lock, release
+ that lock here. */
+#if __HAVE_DMA_HISTOGRAM
+ atomic_inc( &dev->histo.ctx[DRM(histogram_slot)(get_cycles()
+ - dev->ctx_start)] );
+
+#endif
+ clear_bit( 0, &dev->context_flag );
+ DRM_OS_WAKEUP( &dev->context_wait );
+
+ return 0;
+}
+
+int DRM(resctx)( DRM_OS_IOCTL )
+{
+ drm_ctx_res_t res;
+ drm_ctx_t ctx;
+ int i;
+
+ DRM_OS_KRNFROMUSR( res, (drm_ctx_res_t *)data, sizeof(res) );
+
+ if ( res.count >= DRM_RESERVED_CONTEXTS ) {
+ memset( &ctx, 0, sizeof(ctx) );
+ for ( i = 0 ; i < DRM_RESERVED_CONTEXTS ; i++ ) {
+ ctx.handle = i;
+ if ( DRM_OS_COPYTOUSR( &res.contexts[i],
+ &i, sizeof(i) ) )
+ DRM_OS_RETURN(EFAULT);
+ }
+ }
+ res.count = DRM_RESERVED_CONTEXTS;
+
+ DRM_OS_KRNTOUSR( (drm_ctx_res_t *)data, res, sizeof(res) );
+
+ return 0;
+}
+
+int DRM(addctx)( DRM_OS_IOCTL )
+{
+ DRM_OS_DEVICE;
+ drm_ctx_t ctx;
+
+ DRM_OS_KRNFROMUSR( ctx, (drm_ctx_t *)data, sizeof(ctx) );
+
+ ctx.handle = DRM(ctxbitmap_next)( dev );
+ if ( ctx.handle == DRM_KERNEL_CONTEXT ) {
+ /* Skip kernel's context and get a new one. */
+ ctx.handle = DRM(ctxbitmap_next)( dev );
+ }
+ DRM_DEBUG( "%d\n", ctx.handle );
+ if ( ctx.handle == -1 ) {
+ DRM_DEBUG( "Not enough free contexts.\n" );
+ /* Should this return -EBUSY instead? */
+ DRM_OS_RETURN(ENOMEM);
+ }
+
+ DRM_OS_KRNTOUSR( (drm_ctx_t *)data, ctx, sizeof(ctx) );
+
+ return 0;
+}
+
+int DRM(modctx)( DRM_OS_IOCTL )
+{
+ /* This does nothing */
+ return 0;
+}
+
+int DRM(getctx)( DRM_OS_IOCTL )
+{
+ drm_ctx_t ctx;
+
+ DRM_OS_KRNFROMUSR( ctx, (drm_ctx_t *)data, sizeof(ctx) );
+
+ /* This is 0, because we don't handle any context flags */
+ ctx.flags = 0;
+
+ DRM_OS_KRNTOUSR( (drm_ctx_t *)data, ctx, sizeof(ctx) );
+
+ return 0;
+}
+
+int DRM(switchctx)( DRM_OS_IOCTL )
+{
+ DRM_OS_DEVICE;
+ drm_ctx_t ctx;
+
+ DRM_OS_KRNFROMUSR( ctx, (drm_ctx_t *)data, sizeof(ctx) );
+
+ DRM_DEBUG( "%d\n", ctx.handle );
+ return DRM(context_switch)( dev, dev->last_context, ctx.handle );
+}
+
+int DRM(newctx)( DRM_OS_IOCTL )
+{
+ DRM_OS_DEVICE;
+ drm_ctx_t ctx;
+
+ DRM_OS_KRNFROMUSR( ctx, (drm_ctx_t *)data, sizeof(ctx) );
+
+ DRM_DEBUG( "%d\n", ctx.handle );
+ DRM(context_switch_complete)( dev, ctx.handle );
+
+ return 0;
+}
+
+int DRM(rmctx)( DRM_OS_IOCTL )
+{
+ DRM_OS_DEVICE;
+ drm_ctx_t ctx;
+
+ DRM_OS_KRNFROMUSR( ctx, (drm_ctx_t *)data, sizeof(ctx) );
+
+ DRM_DEBUG( "%d\n", ctx.handle );
+ if ( ctx.handle != DRM_KERNEL_CONTEXT ) {
+ DRM(ctxbitmap_free)( dev, ctx.handle );
+ }
+
+ return 0;
+}
+
+
+#else /* __HAVE_CTX_BITMAP */
+
+/* ================================================================
+ * Old-style context support
+ */
+
+
+int DRM(context_switch)(drm_device_t *dev, int old, int new)
+{
+ char buf[64];
+ drm_queue_t *q;
+
+#if 0
+ atomic_inc(&dev->total_ctx);
+#endif
+
+ if (test_and_set_bit(0, &dev->context_flag)) {
+ DRM_ERROR("Reentering -- FIXME\n");
+ DRM_OS_RETURN(EBUSY);
+ }
+
+#if __HAVE_DMA_HISTOGRAM
+ dev->ctx_start = get_cycles();
+#endif
+
+ DRM_DEBUG("Context switch from %d to %d\n", old, new);
+
+ if (new >= dev->queue_count) {
+ clear_bit(0, &dev->context_flag);
+ DRM_OS_RETURN(EINVAL);
+ }
+
+ if (new == dev->last_context) {
+ clear_bit(0, &dev->context_flag);
+ return 0;
+ }
+
+ q = dev->queuelist[new];
+ atomic_inc(&q->use_count);
+ if (atomic_read(&q->use_count) == 1) {
+ atomic_dec(&q->use_count);
+ clear_bit(0, &dev->context_flag);
+ DRM_OS_RETURN(EINVAL);
+ }
+
+ if (DRM(flags) & DRM_FLAG_NOCTX) {
+ DRM(context_switch_complete)(dev, new);
+ } else {
+ sprintf(buf, "C %d %d\n", old, new);
+ DRM(write_string)(dev, buf);
+ }
+
+ atomic_dec(&q->use_count);
+
+ return 0;
+}
+
+int DRM(context_switch_complete)(drm_device_t *dev, int new)
+{
+ drm_device_dma_t *dma = dev->dma;
+
+ dev->last_context = new; /* PRE/POST: This is the _only_ writer. */
+ dev->last_switch = jiffies;
+
+ if (!_DRM_LOCK_IS_HELD(dev->lock.hw_lock->lock)) {
+ DRM_ERROR("Lock isn't held after context switch\n");
+ }
+
+ if (!dma || !(dma->next_buffer && dma->next_buffer->while_locked)) {
+ if (DRM(lock_free)(dev, &dev->lock.hw_lock->lock,
+ DRM_KERNEL_CONTEXT)) {
+ DRM_ERROR("Cannot free lock\n");
+ }
+ }
+
+#if __HAVE_DMA_HISTOGRAM
+ atomic_inc(&dev->histo.ctx[DRM(histogram_slot)(get_cycles()
+ - dev->ctx_start)]);
+
+#endif
+ clear_bit(0, &dev->context_flag);
+ DRM_OS_WAKEUP_INT(&dev->context_wait);
+
+ return 0;
+}
+
+static int DRM(init_queue)(drm_device_t *dev, drm_queue_t *q, drm_ctx_t *ctx)
+{
+ DRM_DEBUG("\n");
+
+ if (atomic_read(&q->use_count) != 1
+ || atomic_read(&q->finalization)
+ || atomic_read(&q->block_count)) {
+ DRM_ERROR("New queue is already in use: u%ld f%ld b%ld\n",
+ (unsigned long)atomic_read(&q->use_count),
+ (unsigned long)atomic_read(&q->finalization),
+ (unsigned long)atomic_read(&q->block_count));
+ }
+
+ atomic_set(&q->finalization, 0);
+ atomic_set(&q->block_count, 0);
+ atomic_set(&q->block_read, 0);
+ atomic_set(&q->block_write, 0);
+ atomic_set(&q->total_queued, 0);
+ atomic_set(&q->total_flushed, 0);
+ atomic_set(&q->total_locks, 0);
+
+ q->write_queue = 0;
+ q->read_queue = 0;
+ q->flush_queue = 0;
+
+ q->flags = ctx->flags;
+
+ DRM(waitlist_create)(&q->waitlist, dev->dma->buf_count);
+
+ return 0;
+}
+
+
+/* drm_alloc_queue:
+PRE: 1) dev->queuelist[0..dev->queue_count] is allocated and will not
+ disappear (so all deallocation must be done after IOCTLs are off)
+ 2) dev->queue_count < dev->queue_slots
+ 3) dev->queuelist[i].use_count == 0 and
+ dev->queuelist[i].finalization == 0 if i not in use
+POST: 1) dev->queuelist[i].use_count == 1
+ 2) dev->queue_count < dev->queue_slots */
+
+static int DRM(alloc_queue)(drm_device_t *dev)
+{
+ int i;
+ drm_queue_t *queue;
+ int oldslots;
+ int newslots;
+ /* Check for a free queue */
+ for (i = 0; i < dev->queue_count; i++) {
+ atomic_inc(&dev->queuelist[i]->use_count);
+ if (atomic_read(&dev->queuelist[i]->use_count) == 1
+ && !atomic_read(&dev->queuelist[i]->finalization)) {
+ DRM_DEBUG("%d (free)\n", i);
+ return i;
+ }
+ atomic_dec(&dev->queuelist[i]->use_count);
+ }
+ /* Allocate a new queue */
+ DRM_OS_LOCK;
+
+ queue = gamma_alloc(sizeof(*queue), DRM_MEM_QUEUES);
+ memset(queue, 0, sizeof(*queue));
+ atomic_set(&queue->use_count, 1);
+
+ ++dev->queue_count;
+ if (dev->queue_count >= dev->queue_slots) {
+ oldslots = dev->queue_slots * sizeof(*dev->queuelist);
+ if (!dev->queue_slots) dev->queue_slots = 1;
+ dev->queue_slots *= 2;
+ newslots = dev->queue_slots * sizeof(*dev->queuelist);
+
+ dev->queuelist = DRM(realloc)(dev->queuelist,
+ oldslots,
+ newslots,
+ DRM_MEM_QUEUES);
+ if (!dev->queuelist) {
+ DRM_OS_UNLOCK;
+ DRM_DEBUG("out of memory\n");
+ DRM_OS_RETURN(ENOMEM);
+ }
+ }
+ dev->queuelist[dev->queue_count-1] = queue;
+
+ DRM_OS_UNLOCK;
+ DRM_DEBUG("%d (new)\n", dev->queue_count - 1);
+ return dev->queue_count - 1;
+}
+
+int DRM(resctx)( DRM_OS_IOCTL )
+{
+ drm_ctx_res_t res;
+ drm_ctx_t ctx;
+ int i;
+
+ DRM_DEBUG("%d\n", DRM_RESERVED_CONTEXTS);
+
+ DRM_OS_KRNFROMUSR( res, (drm_ctx_res_t *)data, sizeof(res) );
+
+ if (res.count >= DRM_RESERVED_CONTEXTS) {
+ memset(&ctx, 0, sizeof(ctx));
+ for (i = 0; i < DRM_RESERVED_CONTEXTS; i++) {
+ ctx.handle = i;
+ if (DRM_OS_COPYTOUSR(&res.contexts[i],
+ &i,
+ sizeof(i)))
+ DRM_OS_RETURN(EFAULT);
+ }
+ }
+ res.count = DRM_RESERVED_CONTEXTS;
+
+ DRM_OS_KRNTOUSR( (drm_ctx_res_t *)data, res, sizeof(res) );
+
+ return 0;
+}
+
+int DRM(addctx)( DRM_OS_IOCTL )
+{
+ DRM_OS_DEVICE;
+ drm_ctx_t ctx;
+
+ DRM_OS_KRNFROMUSR( ctx, (drm_ctx_t *)data, sizeof(ctx) );
+
+ if ((ctx.handle = DRM(alloc_queue)(dev)) == DRM_KERNEL_CONTEXT) {
+ /* Init kernel's context and get a new one. */
+ DRM(init_queue)(dev, dev->queuelist[ctx.handle], &ctx);
+ ctx.handle = DRM(alloc_queue)(dev);
+ }
+ DRM(init_queue)(dev, dev->queuelist[ctx.handle], &ctx);
+ DRM_DEBUG("%d\n", ctx.handle);
+
+ DRM_OS_KRNTOUSR( (drm_ctx_t *)data, ctx, sizeof(ctx) );
+
+ return 0;
+}
+
+int DRM(modctx)( DRM_OS_IOCTL )
+{
+ DRM_OS_DEVICE;
+ drm_ctx_t ctx;
+ drm_queue_t *q;
+
+ DRM_OS_KRNFROMUSR( ctx, (drm_ctx_t *)data, sizeof(ctx) );
+
+ DRM_DEBUG("%d\n", ctx.handle);
+
+ if (ctx.handle < 0 || ctx.handle >= dev->queue_count)
+ DRM_OS_RETURN(EINVAL);
+ q = dev->queuelist[ctx.handle];
+
+ atomic_inc(&q->use_count);
+ if (atomic_read(&q->use_count) == 1) {
+ /* No longer in use */
+ atomic_dec(&q->use_count);
+ DRM_OS_RETURN(EINVAL);
+ }
+
+ if (DRM_BUFCOUNT(&q->waitlist)) {
+ atomic_dec(&q->use_count);
+ DRM_OS_RETURN(EBUSY);
+ }
+
+ q->flags = ctx.flags;
+
+ atomic_dec(&q->use_count);
+ return 0;
+}
+
+int DRM(getctx)( DRM_OS_IOCTL )
+{
+ DRM_OS_DEVICE;
+ drm_ctx_t ctx;
+ drm_queue_t *q;
+
+ DRM_OS_KRNFROMUSR( ctx, (drm_ctx_t *)data, sizeof(ctx) );
+
+ DRM_DEBUG("%d\n", ctx.handle);
+
+ if (ctx.handle >= dev->queue_count)
+ DRM_OS_RETURN(EINVAL);
+ q = dev->queuelist[ctx.handle];
+
+ atomic_inc(&q->use_count);
+ if (atomic_read(&q->use_count) == 1) {
+ /* No longer in use */
+ atomic_dec(&q->use_count);
+ DRM_OS_RETURN(EINVAL);
+ }
+
+ ctx.flags = q->flags;
+ atomic_dec(&q->use_count);
+
+ DRM_OS_KRNTOUSR( (drm_ctx_t *)data, ctx, sizeof(ctx) );
+
+ return 0;
+}
+
+int DRM(switchctx)( DRM_OS_IOCTL )
+{
+ DRM_OS_DEVICE;
+ drm_ctx_t ctx;
+
+ DRM_OS_KRNFROMUSR( ctx, (drm_ctx_t *)data, sizeof(ctx) );
+
+ DRM_DEBUG("%d\n", ctx.handle);
+ return DRM(context_switch)(dev, dev->last_context, ctx.handle);
+}
+
+int DRM(newctx)( DRM_OS_IOCTL )
+{
+ DRM_OS_DEVICE;
+ drm_ctx_t ctx;
+
+ DRM_OS_KRNFROMUSR( ctx, (drm_ctx_t *)data, sizeof(ctx) );
+
+ DRM_DEBUG("%d\n", ctx.handle);
+ DRM(context_switch_complete)(dev, ctx.handle);
+
+ return 0;
+}
+
+int DRM(rmctx)( DRM_OS_IOCTL )
+{
+ DRM_OS_DEVICE;
+ drm_ctx_t ctx;
+ drm_queue_t *q;
+ drm_buf_t *buf;
+
+ DRM_OS_KRNFROMUSR( ctx, (drm_ctx_t *)data, sizeof(ctx) );
+
+ DRM_DEBUG("%d\n", ctx.handle);
+
+ if (ctx.handle >= dev->queue_count) DRM_OS_RETURN(EINVAL);
+ q = dev->queuelist[ctx.handle];
+
+ atomic_inc(&q->use_count);
+ if (atomic_read(&q->use_count) == 1) {
+ /* No longer in use */
+ atomic_dec(&q->use_count);
+ DRM_OS_RETURN(EINVAL);
+ }
+
+ atomic_inc(&q->finalization); /* Mark queue in finalization state */
+ atomic_sub(2, &q->use_count); /* Mark queue as unused (pending
+ finalization) */
+
+ while (test_and_set_bit(0, &dev->interrupt_flag)) {
+ static int never;
+ int retcode;
+ retcode = tsleep(&never, PZERO|PCATCH, "never", 1);
+ if (retcode)
+ return retcode;
+ }
+ /* Remove queued buffers */
+ while ((buf = DRM(waitlist_get)(&q->waitlist))) {
+ DRM(free_buffer)(dev, buf);
+ }
+ clear_bit(0, &dev->interrupt_flag);
+
+ /* Wakeup blocked processes */
+ wakeup( &q->block_read );
+ wakeup( &q->block_write );
+ DRM_OS_WAKEUP_INT( &q->flush_queue );
+ /* Finalization over. Queue is made
+ available when both use_count and
+ finalization become 0, which won't
+ happen until all the waiting processes
+ stop waiting. */
+ atomic_dec(&q->finalization);
+ return 0;
+}
+
+#endif /* __HAVE_CTX_BITMAP */
diff --git a/bsd/drm_dma.h b/bsd/drm_dma.h
new file mode 100644
index 00000000..e5aef241
--- /dev/null
+++ b/bsd/drm_dma.h
@@ -0,0 +1,605 @@
+/* drm_dma.c -- DMA IOCTL and function support -*- linux-c -*-
+ * Created: Fri Mar 19 14:30:16 1999 by faith@valinux.com
+ *
+ * Copyright 1999, 2000 Precision Insight, Inc., Cedar Park, Texas.
+ * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors:
+ * Rickard E. (Rik) Faith <faith@valinux.com>
+ * Gareth Hughes <gareth@valinux.com>
+ */
+
+#include <machine/bus.h>
+#include <machine/resource.h>
+#include <sys/rman.h>
+
+#include "drmP.h"
+
+#ifndef __HAVE_DMA_WAITQUEUE
+#define __HAVE_DMA_WAITQUEUE 0
+#endif
+#ifndef __HAVE_DMA_RECLAIM
+#define __HAVE_DMA_RECLAIM 0
+#endif
+#ifndef __HAVE_SHARED_IRQ
+#define __HAVE_SHARED_IRQ 0
+#endif
+
+#if __HAVE_SHARED_IRQ
+#define DRM_IRQ_TYPE SA_SHIRQ
+#else
+#define DRM_IRQ_TYPE 0
+#endif
+
+#if __HAVE_DMA
+
+int DRM(dma_setup)( drm_device_t *dev )
+{
+ int i;
+
+ dev->dma = DRM(alloc)( sizeof(*dev->dma), DRM_MEM_DRIVER );
+ if ( !dev->dma )
+ DRM_OS_RETURN(ENOMEM);
+
+ memset( dev->dma, 0, sizeof(*dev->dma) );
+
+ for ( i = 0 ; i <= DRM_MAX_ORDER ; i++ )
+ memset(&dev->dma->bufs[i], 0, sizeof(dev->dma->bufs[0]));
+
+ return 0;
+}
+
+void DRM(dma_takedown)(drm_device_t *dev)
+{
+ drm_device_dma_t *dma = dev->dma;
+ int i, j;
+
+ if (!dma) return;
+
+ /* Clear dma buffers */
+ for (i = 0; i <= DRM_MAX_ORDER; i++) {
+ if (dma->bufs[i].seg_count) {
+ DRM_DEBUG("order %d: buf_count = %d,"
+ " seg_count = %d\n",
+ i,
+ dma->bufs[i].buf_count,
+ dma->bufs[i].seg_count);
+ for (j = 0; j < dma->bufs[i].seg_count; j++) {
+ DRM(free_pages)(dma->bufs[i].seglist[j],
+ dma->bufs[i].page_order,
+ DRM_MEM_DMA);
+ }
+ DRM(free)(dma->bufs[i].seglist,
+ dma->bufs[i].seg_count
+ * sizeof(*dma->bufs[0].seglist),
+ DRM_MEM_SEGS);
+ }
+ if(dma->bufs[i].buf_count) {
+ for(j = 0; j < dma->bufs[i].buf_count; j++) {
+ if(dma->bufs[i].buflist[j].dev_private) {
+ DRM(free)(dma->bufs[i].buflist[j].dev_private,
+ dma->bufs[i].buflist[j].dev_priv_size,
+ DRM_MEM_BUFS);
+ }
+ }
+ DRM(free)(dma->bufs[i].buflist,
+ dma->bufs[i].buf_count *
+ sizeof(*dma->bufs[0].buflist),
+ DRM_MEM_BUFS);
+#if __HAVE_DMA_FREELIST
+ DRM(freelist_destroy)(&dma->bufs[i].freelist);
+#endif
+ }
+ }
+
+ if (dma->buflist) {
+ DRM(free)(dma->buflist,
+ dma->buf_count * sizeof(*dma->buflist),
+ DRM_MEM_BUFS);
+ }
+
+ if (dma->pagelist) {
+ DRM(free)(dma->pagelist,
+ dma->page_count * sizeof(*dma->pagelist),
+ DRM_MEM_PAGES);
+ }
+ DRM(free)(dev->dma, sizeof(*dev->dma), DRM_MEM_DRIVER);
+ dev->dma = NULL;
+}
+
+
+#if __HAVE_DMA_HISTOGRAM
+/* This is slow, but is useful for debugging. */
+int DRM(histogram_slot)(unsigned long count)
+{
+ int value = DRM_DMA_HISTOGRAM_INITIAL;
+ int slot;
+
+ for (slot = 0;
+ slot < DRM_DMA_HISTOGRAM_SLOTS;
+ ++slot, value = DRM_DMA_HISTOGRAM_NEXT(value)) {
+ if (count < value) return slot;
+ }
+ return DRM_DMA_HISTOGRAM_SLOTS - 1;
+}
+
+void DRM(histogram_compute)(drm_device_t *dev, drm_buf_t *buf)
+{
+ cycles_t queued_to_dispatched;
+ cycles_t dispatched_to_completed;
+ cycles_t completed_to_freed;
+ int q2d, d2c, c2f, q2c, q2f;
+
+ if (buf->time_queued) {
+ queued_to_dispatched = (buf->time_dispatched
+ - buf->time_queued);
+ dispatched_to_completed = (buf->time_completed
+ - buf->time_dispatched);
+ completed_to_freed = (buf->time_freed
+ - buf->time_completed);
+
+ q2d = DRM(histogram_slot)(queued_to_dispatched);
+ d2c = DRM(histogram_slot)(dispatched_to_completed);
+ c2f = DRM(histogram_slot)(completed_to_freed);
+
+ q2c = DRM(histogram_slot)(queued_to_dispatched
+ + dispatched_to_completed);
+ q2f = DRM(histogram_slot)(queued_to_dispatched
+ + dispatched_to_completed
+ + completed_to_freed);
+
+ atomic_inc(&dev->histo.total);
+ atomic_inc(&dev->histo.queued_to_dispatched[q2d]);
+ atomic_inc(&dev->histo.dispatched_to_completed[d2c]);
+ atomic_inc(&dev->histo.completed_to_freed[c2f]);
+
+ atomic_inc(&dev->histo.queued_to_completed[q2c]);
+ atomic_inc(&dev->histo.queued_to_freed[q2f]);
+
+ }
+ buf->time_queued = 0;
+ buf->time_dispatched = 0;
+ buf->time_completed = 0;
+ buf->time_freed = 0;
+}
+#endif
+
+void DRM(free_buffer)(drm_device_t *dev, drm_buf_t *buf)
+{
+ if (!buf) return;
+
+ buf->waiting = 0;
+ buf->pending = 0;
+ buf->pid = 0;
+ buf->used = 0;
+#if __HAVE_DMA_HISTOGRAM
+ buf->time_completed = get_cycles();
+#endif
+
+ if ( buf->dma_wait ) {
+ wakeup( &buf->dma_wait );
+ buf->dma_wait = 0;
+ }
+#if __HAVE_DMA_FREELIST
+ else {
+ drm_device_dma_t *dma = dev->dma;
+ /* If processes are waiting, the last one
+ to wake will put the buffer on the free
+ list. If no processes are waiting, we
+ put the buffer on the freelist here. */
+ DRM(freelist_put)(dev, &dma->bufs[buf->order].freelist, buf);
+ }
+#endif
+}
+
+#if !__HAVE_DMA_RECLAIM
+void DRM(reclaim_buffers)(drm_device_t *dev, pid_t pid)
+{
+ drm_device_dma_t *dma = dev->dma;
+ int i;
+
+ if (!dma) return;
+ for (i = 0; i < dma->buf_count; i++) {
+ if (dma->buflist[i]->pid == pid) {
+ switch (dma->buflist[i]->list) {
+ case DRM_LIST_NONE:
+ DRM(free_buffer)(dev, dma->buflist[i]);
+ break;
+ case DRM_LIST_WAIT:
+ dma->buflist[i]->list = DRM_LIST_RECLAIM;
+ break;
+ default:
+ /* Buffer already on hardware. */
+ break;
+ }
+ }
+ }
+}
+#endif
+
+
+/* GH: This is a big hack for now...
+ */
+#if __HAVE_OLD_DMA
+
+void DRM(clear_next_buffer)(drm_device_t *dev)
+{
+ drm_device_dma_t *dma = dev->dma;
+
+ dma->next_buffer = NULL;
+ if (dma->next_queue && !DRM_BUFCOUNT(&dma->next_queue->waitlist)) {
+ DRM_OS_WAKEUP_INT(&dma->next_queue->flush_queue);
+ }
+ dma->next_queue = NULL;
+}
+
+int DRM(select_queue)(drm_device_t *dev, void (*wrapper)(unsigned long))
+{
+ int i;
+ int candidate = -1;
+ int j = jiffies;
+
+ if (!dev) {
+ DRM_ERROR("No device\n");
+ return -1;
+ }
+ if (!dev->queuelist || !dev->queuelist[DRM_KERNEL_CONTEXT]) {
+ /* This only happens between the time the
+ interrupt is initialized and the time
+ the queues are initialized. */
+ return -1;
+ }
+
+ /* Doing "while locked" DMA? */
+ if (DRM_WAITCOUNT(dev, DRM_KERNEL_CONTEXT)) {
+ return DRM_KERNEL_CONTEXT;
+ }
+
+ /* If there are buffers on the last_context
+ queue, and we have not been executing
+ this context very long, continue to
+ execute this context. */
+ if (dev->last_switch <= j
+ && dev->last_switch + DRM_TIME_SLICE > j
+ && DRM_WAITCOUNT(dev, dev->last_context)) {
+ return dev->last_context;
+ }
+
+ /* Otherwise, find a candidate */
+ for (i = dev->last_checked + 1; i < dev->queue_count; i++) {
+ if (DRM_WAITCOUNT(dev, i)) {
+ candidate = dev->last_checked = i;
+ break;
+ }
+ }
+
+ if (candidate < 0) {
+ for (i = 0; i < dev->queue_count; i++) {
+ if (DRM_WAITCOUNT(dev, i)) {
+ candidate = dev->last_checked = i;
+ break;
+ }
+ }
+ }
+
+ if (wrapper
+ && candidate >= 0
+ && candidate != dev->last_context
+ && dev->last_switch <= j
+ && dev->last_switch + DRM_TIME_SLICE > j) {
+ int s = splclock();
+ if (dev->timer.c_time != dev->last_switch + DRM_TIME_SLICE) {
+ callout_reset(&dev->timer,
+ dev->last_switch + DRM_TIME_SLICE - j,
+ (void (*)(void *))wrapper,
+ dev);
+ }
+ splx(s);
+ return -1;
+ }
+
+ return candidate;
+}
+
+
+int DRM(dma_enqueue)(drm_device_t *dev, drm_dma_t *d)
+{
+ int i;
+ drm_queue_t *q;
+ drm_buf_t *buf;
+ int idx;
+ int while_locked = 0;
+ drm_device_dma_t *dma = dev->dma;
+ int error;
+
+ DRM_DEBUG("%d\n", d->send_count);
+
+ if (d->flags & _DRM_DMA_WHILE_LOCKED) {
+ int context = dev->lock.hw_lock->lock;
+
+ if (!_DRM_LOCK_IS_HELD(context)) {
+ DRM_ERROR("No lock held during \"while locked\""
+ " request\n");
+ DRM_OS_RETURN(EINVAL);
+ }
+ if (d->context != _DRM_LOCKING_CONTEXT(context)
+ && _DRM_LOCKING_CONTEXT(context) != DRM_KERNEL_CONTEXT) {
+ DRM_ERROR("Lock held by %d while %d makes"
+ " \"while locked\" request\n",
+ _DRM_LOCKING_CONTEXT(context),
+ d->context);
+ DRM_OS_RETURN(EINVAL);
+ }
+ q = dev->queuelist[DRM_KERNEL_CONTEXT];
+ while_locked = 1;
+ } else {
+ q = dev->queuelist[d->context];
+ }
+
+
+ atomic_inc(&q->use_count);
+ if (atomic_read(&q->block_write)) {
+ atomic_inc(&q->block_count);
+ for (;;) {
+ if (!atomic_read(&q->block_write)) break;
+ error = tsleep(&q->block_write, PZERO|PCATCH,
+ "dmawr", 0);
+ if (error) {
+ atomic_dec(&q->use_count);
+ return error;
+ }
+ }
+ atomic_dec(&q->block_count);
+ }
+
+ for (i = 0; i < d->send_count; i++) {
+ idx = d->send_indices[i];
+ if (idx < 0 || idx >= dma->buf_count) {
+ atomic_dec(&q->use_count);
+ DRM_ERROR("Index %d (of %d max)\n",
+ d->send_indices[i], dma->buf_count - 1);
+ DRM_OS_RETURN(EINVAL);
+ }
+ buf = dma->buflist[ idx ];
+ if (buf->pid != DRM_OS_CURRENTPID) {
+ atomic_dec(&q->use_count);
+ DRM_ERROR("Process %d using buffer owned by %d\n",
+ DRM_OS_CURRENTPID, buf->pid);
+ DRM_OS_RETURN(EINVAL);
+ }
+ if (buf->list != DRM_LIST_NONE) {
+ atomic_dec(&q->use_count);
+ DRM_ERROR("Process %d using buffer %d on list %d\n",
+ DRM_OS_CURRENTPID, buf->idx, buf->list);
+ }
+ buf->used = d->send_sizes[i];
+ buf->while_locked = while_locked;
+ buf->context = d->context;
+ if (!buf->used) {
+ DRM_ERROR("Queueing 0 length buffer\n");
+ }
+ if (buf->pending) {
+ atomic_dec(&q->use_count);
+ DRM_ERROR("Queueing pending buffer:"
+ " buffer %d, offset %d\n",
+ d->send_indices[i], i);
+ DRM_OS_RETURN(EINVAL);
+ }
+ if (buf->waiting) {
+ atomic_dec(&q->use_count);
+ DRM_ERROR("Queueing waiting buffer:"
+ " buffer %d, offset %d\n",
+ d->send_indices[i], i);
+ DRM_OS_RETURN(EINVAL);
+ }
+ buf->waiting = 1;
+ if (atomic_read(&q->use_count) == 1
+ || atomic_read(&q->finalization)) {
+ DRM(free_buffer)(dev, buf);
+ } else {
+ DRM(waitlist_put)(&q->waitlist, buf);
+ atomic_inc(&q->total_queued);
+ }
+ }
+ atomic_dec(&q->use_count);
+
+ return 0;
+}
+
+static int DRM(dma_get_buffers_of_order)(drm_device_t *dev, drm_dma_t *d,
+ int order)
+{
+ int i;
+ drm_buf_t *buf;
+ drm_device_dma_t *dma = dev->dma;
+
+ for (i = d->granted_count; i < d->request_count; i++) {
+ buf = DRM(freelist_get)(&dma->bufs[order].freelist,
+ d->flags & _DRM_DMA_WAIT);
+ if (!buf) break;
+ if (buf->pending || buf->waiting) {
+ DRM_ERROR("Free buffer %d in use by %d (w%d, p%d)\n",
+ buf->idx,
+ buf->pid,
+ buf->waiting,
+ buf->pending);
+ }
+ buf->pid = DRM_OS_CURRENTPID;
+ if (DRM_OS_COPYTOUSR(&d->request_indices[i],
+ &buf->idx,
+ sizeof(buf->idx)))
+ DRM_OS_RETURN(EFAULT);
+
+ if (DRM_OS_COPYTOUSR(&d->request_sizes[i],
+ &buf->total,
+ sizeof(buf->total)))
+ DRM_OS_RETURN(EFAULT);
+
+ ++d->granted_count;
+ }
+ return 0;
+}
+
+
+int DRM(dma_get_buffers)(drm_device_t *dev, drm_dma_t *dma)
+{
+ int order;
+ int retcode = 0;
+ int tmp_order;
+
+ order = DRM(order)(dma->request_size);
+
+ dma->granted_count = 0;
+ retcode = DRM(dma_get_buffers_of_order)(dev, dma, order);
+
+ if (dma->granted_count < dma->request_count
+ && (dma->flags & _DRM_DMA_SMALLER_OK)) {
+ for (tmp_order = order - 1;
+ !retcode
+ && dma->granted_count < dma->request_count
+ && tmp_order >= DRM_MIN_ORDER;
+ --tmp_order) {
+
+ retcode = DRM(dma_get_buffers_of_order)(dev, dma,
+ tmp_order);
+ }
+ }
+
+ if (dma->granted_count < dma->request_count
+ && (dma->flags & _DRM_DMA_LARGER_OK)) {
+ for (tmp_order = order + 1;
+ !retcode
+ && dma->granted_count < dma->request_count
+ && tmp_order <= DRM_MAX_ORDER;
+ ++tmp_order) {
+
+ retcode = DRM(dma_get_buffers_of_order)(dev, dma,
+ tmp_order);
+ }
+ }
+ return 0;
+}
+
+#endif /* __HAVE_OLD_DMA */
+
+
+#if __HAVE_DMA_IRQ
+
+int DRM(irq_install)( drm_device_t *dev, int irq )
+{
+ int rid;
+ int retcode;
+
+ if ( !irq )
+ DRM_OS_RETURN(EINVAL);
+
+ DRM_OS_LOCK;
+ if ( dev->irq ) {
+ DRM_OS_UNLOCK;
+ DRM_OS_RETURN(EBUSY);
+ }
+ dev->irq = irq;
+ DRM_OS_UNLOCK;
+
+ DRM_DEBUG( "%s: irq=%d\n", __FUNCTION__, irq );
+
+ dev->context_flag = 0;
+ dev->interrupt_flag = 0;
+ dev->dma_flag = 0;
+
+ dev->dma->next_buffer = NULL;
+ dev->dma->next_queue = NULL;
+ dev->dma->this_buffer = NULL;
+
+#if __HAVE_DMA_IRQ_BH
+ TASK_INIT(&dev->task, 0, DRM(dma_immediate_bh), dev);
+#endif
+
+ /* Before installing handler */
+ DRIVER_PREINSTALL();
+
+ /* Install handler */
+ rid = 0;
+ dev->irqr = bus_alloc_resource(dev->device, SYS_RES_IRQ, &rid,
+ 0, ~0, 1, RF_SHAREABLE);
+ if (!dev->irqr)
+ return ENOENT;
+
+ retcode = bus_setup_intr(dev->device, dev->irqr, INTR_TYPE_TTY,
+ DRM(dma_service), dev, &dev->irqh);
+ if ( retcode ) {
+ DRM_OS_LOCK;
+ bus_release_resource(dev->device, SYS_RES_IRQ, 0, dev->irqr);
+ dev->irq = 0;
+ DRM_OS_UNLOCK;
+ return retcode;
+ }
+
+ /* After installing handler */
+ DRIVER_POSTINSTALL();
+
+ return 0;
+}
+
+int DRM(irq_uninstall)( drm_device_t *dev )
+{
+ int irq;
+
+ DRM_OS_LOCK;
+ irq = dev->irq;
+ dev->irq = 0;
+ DRM_OS_UNLOCK;
+
+ if ( !irq )
+ DRM_OS_RETURN(EINVAL);
+
+ DRM_DEBUG( "%s: irq=%d\n", __FUNCTION__, irq );
+
+ DRIVER_UNINSTALL();
+
+ bus_teardown_intr(dev->device, dev->irqr, dev->irqh);
+ bus_release_resource(dev->device, SYS_RES_IRQ, 0, dev->irqr);
+
+ return 0;
+}
+
+int DRM(control)( DRM_OS_IOCTL )
+{
+ DRM_OS_DEVICE;
+ drm_control_t ctl;
+
+ DRM_OS_KRNFROMUSR( ctl, (drm_control_t *) data, sizeof(ctl) );
+
+ switch ( ctl.func ) {
+ case DRM_INST_HANDLER:
+ return DRM(irq_install)( dev, ctl.irq );
+ case DRM_UNINST_HANDLER:
+ return DRM(irq_uninstall)( dev );
+ default:
+ DRM_OS_RETURN(EINVAL);
+ }
+}
+
+#endif /* __HAVE_DMA_IRQ */
+
+#endif /* __HAVE_DMA */
diff --git a/bsd/drm_drawable.h b/bsd/drm_drawable.h
new file mode 100644
index 00000000..f57d8628
--- /dev/null
+++ b/bsd/drm_drawable.h
@@ -0,0 +1,50 @@
+/* drm_drawable.h -- IOCTLs for drawables -*- linux-c -*-
+ * Created: Tue Feb 2 08:37:54 1999 by faith@valinux.com
+ *
+ * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
+ * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors:
+ * Rickard E. (Rik) Faith <faith@valinux.com>
+ * Gareth Hughes <gareth@valinux.com>
+ */
+
+#define __NO_VERSION__
+#include "drmP.h"
+
+int DRM(adddraw)( DRM_OS_IOCTL )
+{
+ drm_draw_t draw;
+
+ draw.handle = 0; /* NOOP */
+ DRM_DEBUG("%d\n", draw.handle);
+
+ DRM_OS_KRNTOUSR( (drm_draw_t *)data, draw, sizeof(draw) );
+
+ return 0;
+}
+
+int DRM(rmdraw)( DRM_OS_IOCTL )
+{
+ return 0; /* NOOP */
+}
diff --git a/bsd/drm_drv.h b/bsd/drm_drv.h
new file mode 100644
index 00000000..4e5d76fb
--- /dev/null
+++ b/bsd/drm_drv.h
@@ -0,0 +1,1160 @@
+/* drm_drv.h -- Generic driver template -*- linux-c -*-
+ * Created: Thu Nov 23 03:10:50 2000 by gareth@valinux.com
+ *
+ * Copyright 1999, 2000 Precision Insight, Inc., Cedar Park, Texas.
+ * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors:
+ * Rickard E. (Rik) Faith <faith@valinux.com>
+ * Gareth Hughes <gareth@valinux.com>
+ */
+
+/*
+ * To use this template, you must at least define the following (samples
+ * given for the MGA driver):
+ *
+ * #define DRIVER_AUTHOR "VA Linux Systems, Inc."
+ *
+ * #define DRIVER_NAME "mga"
+ * #define DRIVER_DESC "Matrox G200/G400"
+ * #define DRIVER_DATE "20001127"
+ *
+ * #define DRIVER_MAJOR 2
+ * #define DRIVER_MINOR 0
+ * #define DRIVER_PATCHLEVEL 2
+ *
+ * #define DRIVER_IOCTL_COUNT DRM_ARRAY_SIZE( mga_ioctls )
+ *
+ * #define DRM(x) mga_##x
+ */
+
+#ifndef __MUST_HAVE_AGP
+#define __MUST_HAVE_AGP 0
+#endif
+#ifndef __HAVE_CTX_BITMAP
+#define __HAVE_CTX_BITMAP 0
+#endif
+#ifndef __HAVE_DMA_IRQ
+#define __HAVE_DMA_IRQ 0
+#endif
+#ifndef __HAVE_DMA_QUEUE
+#define __HAVE_DMA_QUEUE 0
+#endif
+#ifndef __HAVE_MULTIPLE_DMA_QUEUES
+#define __HAVE_MULTIPLE_DMA_QUEUES 0
+#endif
+#ifndef __HAVE_DMA_SCHEDULE
+#define __HAVE_DMA_SCHEDULE 0
+#endif
+#ifndef __HAVE_DMA_FLUSH
+#define __HAVE_DMA_FLUSH 0
+#endif
+#ifndef __HAVE_DMA_READY
+#define __HAVE_DMA_READY 0
+#endif
+#ifndef __HAVE_DMA_QUIESCENT
+#define __HAVE_DMA_QUIESCENT 0
+#endif
+#ifndef __HAVE_RELEASE
+#define __HAVE_RELEASE 0
+#endif
+#ifndef __HAVE_COUNTERS
+#define __HAVE_COUNTERS 0
+#endif
+#ifndef __HAVE_SG
+#define __HAVE_SG 0
+#endif
+#ifndef __HAVE_KERNEL_CTX_SWITCH
+#define __HAVE_KERNEL_CTX_SWITCH 0
+#endif
+#ifndef PCI_ANY_ID
+#define PCI_ANY_ID ~0
+#endif
+
+#ifndef DRIVER_PREINIT
+#define DRIVER_PREINIT()
+#endif
+#ifndef DRIVER_POSTINIT
+#define DRIVER_POSTINIT()
+#endif
+#ifndef DRIVER_PRERELEASE
+#define DRIVER_PRERELEASE()
+#endif
+#ifndef DRIVER_PRETAKEDOWN
+#define DRIVER_PRETAKEDOWN()
+#endif
+#ifndef DRIVER_POSTCLEANUP
+#define DRIVER_POSTCLEANUP()
+#endif
+#ifndef DRIVER_PRESETUP
+#define DRIVER_PRESETUP()
+#endif
+#ifndef DRIVER_POSTSETUP
+#define DRIVER_POSTSETUP()
+#endif
+#ifndef DRIVER_IOCTLS
+#define DRIVER_IOCTLS
+#endif
+#ifndef DRIVER_FOPS
+#if DRM_LINUX
+#include <sys/file.h>
+#include <sys/proc.h>
+#include <machine/../linux/linux.h>
+#include <machine/../linux/linux_proto.h>
+#include "drm_linux.h"
+#endif
+#endif
+
+
+/*
+ * The default number of instances (minor numbers) to initialize.
+ */
+#ifndef DRIVER_NUM_CARDS
+#define DRIVER_NUM_CARDS 1
+#endif
+
+static int DRM(init)(device_t nbdev);
+static void DRM(cleanup)(device_t nbdev);
+
+#define CDEV_MAJOR 145
+#define DRIVER_SOFTC(unit) \
+ ((drm_device_t *) devclass_get_softc(DRM(devclass), unit))
+
+#if __REALLY_HAVE_AGP
+MODULE_DEPEND(DRIVER_NAME, agp, 1, 1, 1);
+#endif
+#if DRM_LINUX
+MODULE_DEPEND(DRIVER_NAME, linux, 1, 1, 1);
+#endif
+
+static drm_device_t *DRM(device);
+static int *DRM(minor);
+static int DRM(numdevs) = 0;
+
+
+static drm_ioctl_desc_t DRM(ioctls)[] = {
+ [DRM_IOCTL_NR(DRM_IOCTL_VERSION)] = { DRM(version), 0, 0 },
+ [DRM_IOCTL_NR(DRM_IOCTL_GET_UNIQUE)] = { DRM(getunique), 0, 0 },
+ [DRM_IOCTL_NR(DRM_IOCTL_GET_MAGIC)] = { DRM(getmagic), 0, 0 },
+ [DRM_IOCTL_NR(DRM_IOCTL_IRQ_BUSID)] = { DRM(irq_busid), 0, 1 },
+ [DRM_IOCTL_NR(DRM_IOCTL_GET_MAP)] = { DRM(getmap), 0, 0 },
+ [DRM_IOCTL_NR(DRM_IOCTL_GET_CLIENT)] = { DRM(getclient), 0, 0 },
+ [DRM_IOCTL_NR(DRM_IOCTL_GET_STATS)] = { DRM(getstats), 0, 0 },
+
+ [DRM_IOCTL_NR(DRM_IOCTL_SET_UNIQUE)] = { DRM(setunique), 1, 1 },
+ [DRM_IOCTL_NR(DRM_IOCTL_BLOCK)] = { DRM(block), 1, 1 },
+ [DRM_IOCTL_NR(DRM_IOCTL_UNBLOCK)] = { DRM(unblock), 1, 1 },
+ [DRM_IOCTL_NR(DRM_IOCTL_AUTH_MAGIC)] = { DRM(authmagic), 1, 1 },
+
+ [DRM_IOCTL_NR(DRM_IOCTL_ADD_MAP)] = { DRM(addmap), 1, 1 },
+ [DRM_IOCTL_NR(DRM_IOCTL_RM_MAP)] = { DRM(rmmap), 1, 0 },
+
+#if __HAVE_CTX_BITMAP
+ [DRM_IOCTL_NR(DRM_IOCTL_SET_SAREA_CTX)] = { DRM(setsareactx), 1, 1 },
+ [DRM_IOCTL_NR(DRM_IOCTL_GET_SAREA_CTX)] = { DRM(getsareactx), 1, 0 },
+#endif
+
+ [DRM_IOCTL_NR(DRM_IOCTL_ADD_CTX)] = { DRM(addctx), 1, 1 },
+ [DRM_IOCTL_NR(DRM_IOCTL_RM_CTX)] = { DRM(rmctx), 1, 1 },
+ [DRM_IOCTL_NR(DRM_IOCTL_MOD_CTX)] = { DRM(modctx), 1, 1 },
+ [DRM_IOCTL_NR(DRM_IOCTL_GET_CTX)] = { DRM(getctx), 1, 0 },
+ [DRM_IOCTL_NR(DRM_IOCTL_SWITCH_CTX)] = { DRM(switchctx), 1, 1 },
+ [DRM_IOCTL_NR(DRM_IOCTL_NEW_CTX)] = { DRM(newctx), 1, 1 },
+ [DRM_IOCTL_NR(DRM_IOCTL_RES_CTX)] = { DRM(resctx), 1, 0 },
+
+ [DRM_IOCTL_NR(DRM_IOCTL_ADD_DRAW)] = { DRM(adddraw), 1, 1 },
+ [DRM_IOCTL_NR(DRM_IOCTL_RM_DRAW)] = { DRM(rmdraw), 1, 1 },
+
+ [DRM_IOCTL_NR(DRM_IOCTL_LOCK)] = { DRM(lock), 1, 0 },
+ [DRM_IOCTL_NR(DRM_IOCTL_UNLOCK)] = { DRM(unlock), 1, 0 },
+ [DRM_IOCTL_NR(DRM_IOCTL_FINISH)] = { DRM(finish), 1, 0 },
+
+#if __HAVE_DMA
+ [DRM_IOCTL_NR(DRM_IOCTL_ADD_BUFS)] = { DRM(addbufs), 1, 1 },
+ [DRM_IOCTL_NR(DRM_IOCTL_MARK_BUFS)] = { DRM(markbufs), 1, 1 },
+ [DRM_IOCTL_NR(DRM_IOCTL_INFO_BUFS)] = { DRM(infobufs), 1, 0 },
+ [DRM_IOCTL_NR(DRM_IOCTL_MAP_BUFS)] = { DRM(mapbufs), 1, 0 },
+ [DRM_IOCTL_NR(DRM_IOCTL_FREE_BUFS)] = { DRM(freebufs), 1, 0 },
+
+ /* The DRM_IOCTL_DMA ioctl should be defined by the driver.
+ */
+#if __HAVE_DMA_IRQ
+ [DRM_IOCTL_NR(DRM_IOCTL_CONTROL)] = { DRM(control), 1, 1 },
+#endif
+#endif
+
+#if __REALLY_HAVE_AGP
+ [DRM_IOCTL_NR(DRM_IOCTL_AGP_ACQUIRE)] = { DRM(agp_acquire), 1, 1 },
+ [DRM_IOCTL_NR(DRM_IOCTL_AGP_RELEASE)] = { DRM(agp_release), 1, 1 },
+ [DRM_IOCTL_NR(DRM_IOCTL_AGP_ENABLE)] = { DRM(agp_enable), 1, 1 },
+ [DRM_IOCTL_NR(DRM_IOCTL_AGP_INFO)] = { DRM(agp_info), 1, 0 },
+ [DRM_IOCTL_NR(DRM_IOCTL_AGP_ALLOC)] = { DRM(agp_alloc), 1, 1 },
+ [DRM_IOCTL_NR(DRM_IOCTL_AGP_FREE)] = { DRM(agp_free), 1, 1 },
+ [DRM_IOCTL_NR(DRM_IOCTL_AGP_BIND)] = { DRM(agp_bind), 1, 1 },
+ [DRM_IOCTL_NR(DRM_IOCTL_AGP_UNBIND)] = { DRM(agp_unbind), 1, 1 },
+#endif
+
+#if __REALLY_HAVE_SG
+ [DRM_IOCTL_NR(DRM_IOCTL_SG_ALLOC)] = { DRM(sg_alloc), 1, 1 },
+ [DRM_IOCTL_NR(DRM_IOCTL_SG_FREE)] = { DRM(sg_free), 1, 1 },
+#endif
+
+ DRIVER_IOCTLS
+};
+
+#define DRIVER_IOCTL_COUNT DRM_ARRAY_SIZE( DRM(ioctls) )
+
+
+static int DRM(probe)(device_t dev)
+{
+ const char *s = 0;
+
+ int pciid=pci_get_devid(dev);
+ int vendor = (pciid & 0x0000ffff);
+ int device = (pciid & 0xffff0000) >> 16;
+ int i=0, done=0;
+ DRM_INFO("Checking PCI vendor=%d, device=%d\n", vendor, device);
+ while ( !done && (DRM(devicelist)[i].vendor != 0 ) ) {
+ if ( (DRM(devicelist)[i].vendor == vendor) &&
+ (DRM(devicelist)[i].device == device) ) {
+ done=1;
+ if ( DRM(devicelist)[i].supported )
+ s = DRM(devicelist)[i].name;
+ else
+ DRM_INFO("%s not supported\n", DRM(devicelist)[i].name);
+ }
+ i++;
+ }
+
+ if (s) {
+ device_set_desc(dev, s);
+ return 0;
+ }
+
+ return ENXIO;
+}
+
+static int DRM(attach)(device_t dev)
+{
+ return DRM(init)(dev);
+}
+
+static int DRM(detach)(device_t dev)
+{
+ DRM(cleanup)(dev);
+ return 0;
+}
+
+static device_method_t DRM(methods)[] = {
+ /* Device interface */
+ DEVMETHOD(device_probe, DRM( probe)),
+ DEVMETHOD(device_attach, DRM( attach)),
+ DEVMETHOD(device_detach, DRM( detach)),
+
+ { 0, 0 }
+};
+
+static driver_t DRM(driver) = {
+ "drm",
+ DRM(methods),
+ sizeof(drm_device_t),
+};
+
+static devclass_t DRM( devclass);
+
+static struct cdevsw DRM( cdevsw) = {
+ /* open */ DRM( open ),
+ /* close */ DRM( close ),
+ /* read */ DRM( read ),
+ /* write */ DRM( write ),
+ /* ioctl */ DRM( ioctl ),
+ /* poll */ DRM( poll ),
+ /* mmap */ DRM( mmap ),
+ /* strategy */ nostrategy,
+ /* name */ DRIVER_NAME,
+ /* maj */ CDEV_MAJOR,
+ /* dump */ nodump,
+ /* psize */ nopsize,
+ /* flags */ D_TTY | D_TRACKCLOSE,
+#if __FreeBSD_version >= 500000
+ /* kqfilter */ 0
+#else
+ /* bmaj */ -1
+#endif
+};
+
+static int DRM(setup)( drm_device_t *dev )
+{
+ int i;
+
+ DRIVER_PRESETUP();
+ atomic_set( &dev->ioctl_count, 0 );
+ atomic_set( &dev->vma_count, 0 );
+ dev->buf_use = 0;
+ atomic_set( &dev->buf_alloc, 0 );
+
+#if __HAVE_DMA
+ i = DRM(dma_setup)( dev );
+ if ( i < 0 )
+ return i;
+#endif
+
+ dev->counters = 6 + __HAVE_COUNTERS;
+ dev->types[0] = _DRM_STAT_LOCK;
+ dev->types[1] = _DRM_STAT_OPENS;
+ dev->types[2] = _DRM_STAT_CLOSES;
+ dev->types[3] = _DRM_STAT_IOCTLS;
+ dev->types[4] = _DRM_STAT_LOCKS;
+ dev->types[5] = _DRM_STAT_UNLOCKS;
+#ifdef __HAVE_COUNTER6
+ dev->types[6] = __HAVE_COUNTER6;
+#endif
+#ifdef __HAVE_COUNTER7
+ dev->types[7] = __HAVE_COUNTER7;
+#endif
+#ifdef __HAVE_COUNTER8
+ dev->types[8] = __HAVE_COUNTER8;
+#endif
+#ifdef __HAVE_COUNTER9
+ dev->types[9] = __HAVE_COUNTER9;
+#endif
+#ifdef __HAVE_COUNTER10
+ dev->types[10] = __HAVE_COUNTER10;
+#endif
+#ifdef __HAVE_COUNTER11
+ dev->types[11] = __HAVE_COUNTER11;
+#endif
+#ifdef __HAVE_COUNTER12
+ dev->types[12] = __HAVE_COUNTER12;
+#endif
+#ifdef __HAVE_COUNTER13
+ dev->types[13] = __HAVE_COUNTER13;
+#endif
+#ifdef __HAVE_COUNTER14
+ dev->types[14] = __HAVE_COUNTER14;
+#endif
+#ifdef __HAVE_COUNTER15
+ dev->types[14] = __HAVE_COUNTER14;
+#endif
+
+ for ( i = 0 ; i < DRM_ARRAY_SIZE(dev->counts) ; i++ )
+ atomic_set( &dev->counts[i], 0 );
+
+ for ( i = 0 ; i < DRM_HASH_SIZE ; i++ ) {
+ dev->magiclist[i].head = NULL;
+ dev->magiclist[i].tail = NULL;
+ }
+
+ dev->maplist = DRM(alloc)(sizeof(*dev->maplist),
+ DRM_MEM_MAPS);
+ if(dev->maplist == NULL) DRM_OS_RETURN(ENOMEM);
+ memset(dev->maplist, 0, sizeof(*dev->maplist));
+ TAILQ_INIT(dev->maplist);
+ dev->map_count = 0;
+
+ dev->vmalist = NULL;
+ dev->lock.hw_lock = NULL;
+ dev->lock.lock_queue = 0;
+ dev->queue_count = 0;
+ dev->queue_reserved = 0;
+ dev->queue_slots = 0;
+ dev->queuelist = NULL;
+ dev->irq = 0;
+ dev->context_flag = 0;
+ dev->interrupt_flag = 0;
+ dev->dma_flag = 0;
+ dev->last_context = 0;
+ dev->last_switch = 0;
+ dev->last_checked = 0;
+#if __FreeBSD_version >= 500000
+ callout_init( &dev->timer, 1 );
+#else
+ callout_init( &dev->timer );
+#endif
+ dev->context_wait = 0;
+
+ dev->ctx_start = 0;
+ dev->lck_start = 0;
+
+ dev->buf_rp = dev->buf;
+ dev->buf_wp = dev->buf;
+ dev->buf_end = dev->buf + DRM_BSZ;
+ dev->buf_sigio = NULL;
+ dev->buf_readers = 0;
+ dev->buf_writers = 0;
+ dev->buf_selecting = 0;
+
+ DRM_DEBUG( "\n" );
+
+ /* The kernel's context could be created here, but is now created
+ * in drm_dma_enqueue. This is more resource-efficient for
+ * hardware that does not do DMA, but may mean that
+ * drm_select_queue fails between the time the interrupt is
+ * initialized and the time the queues are initialized.
+ */
+ DRIVER_POSTSETUP();
+ return 0;
+}
+
+
+static int DRM(takedown)( drm_device_t *dev )
+{
+ drm_magic_entry_t *pt, *next;
+ drm_map_t *map;
+ drm_map_list_entry_t *list;
+ drm_vma_entry_t *vma, *vma_next;
+ int i;
+
+ DRM_DEBUG( "\n" );
+
+ DRIVER_PRETAKEDOWN();
+#if __HAVE_DMA_IRQ
+ if ( dev->irq ) DRM(irq_uninstall)( dev );
+#endif
+
+ DRM_OS_LOCK;
+ callout_stop( &dev->timer );
+
+ if ( dev->devname ) {
+ DRM(free)( dev->devname, strlen( dev->devname ) + 1,
+ DRM_MEM_DRIVER );
+ dev->devname = NULL;
+ }
+
+ if ( dev->unique ) {
+ DRM(free)( dev->unique, strlen( dev->unique ) + 1,
+ DRM_MEM_DRIVER );
+ dev->unique = NULL;
+ dev->unique_len = 0;
+ }
+ /* Clear pid list */
+ for ( i = 0 ; i < DRM_HASH_SIZE ; i++ ) {
+ for ( pt = dev->magiclist[i].head ; pt ; pt = next ) {
+ next = pt->next;
+ DRM(free)( pt, sizeof(*pt), DRM_MEM_MAGIC );
+ }
+ dev->magiclist[i].head = dev->magiclist[i].tail = NULL;
+ }
+
+#if __REALLY_HAVE_AGP
+ /* Clear AGP information */
+ if ( dev->agp ) {
+ drm_agp_mem_t *entry;
+ drm_agp_mem_t *nexte;
+
+ /* Remove AGP resources, but leave dev->agp
+ intact until drv_cleanup is called. */
+ for ( entry = dev->agp->memory ; entry ; entry = nexte ) {
+ nexte = entry->next;
+ if ( entry->bound ) DRM(unbind_agp)( entry->handle );
+ DRM(free_agp)( entry->handle, entry->pages );
+ DRM(free)( entry, sizeof(*entry), DRM_MEM_AGPLISTS );
+ }
+ dev->agp->memory = NULL;
+
+ if ( dev->agp->acquired ) DRM(agp_do_release)();
+
+ dev->agp->acquired = 0;
+ dev->agp->enabled = 0;
+ }
+#endif
+
+ /* Clear vma list (only built for debugging) */
+ if ( dev->vmalist ) {
+ for ( vma = dev->vmalist ; vma ; vma = vma_next ) {
+ vma_next = vma->next;
+ DRM(free)( vma, sizeof(*vma), DRM_MEM_VMAS );
+ }
+ dev->vmalist = NULL;
+ }
+
+ if( dev->maplist ) {
+ while ((list=TAILQ_FIRST(dev->maplist))) {
+ map = list->map;
+ switch ( map->type ) {
+ case _DRM_REGISTERS:
+ case _DRM_FRAME_BUFFER:
+#if __REALLY_HAVE_MTRR
+ if ( map->mtrr >= 0 ) {
+ int retcode;
+ retcode = mtrr_del( map->mtrr,
+ map->offset,
+ map->size );
+ DRM_DEBUG( "mtrr_del=%d\n", retcode );
+ }
+#endif
+ DRM(ioremapfree)( map->handle, map->size );
+ break;
+ case _DRM_SHM:
+ DRM(free_pages)((unsigned long)map->handle,
+ DRM(order)(map->size)
+ - PAGE_SHIFT,
+ DRM_MEM_SAREA);
+ break;
+
+ case _DRM_AGP:
+ /* Do nothing here, because this is all
+ * handled in the AGP/GART driver.
+ */
+ break;
+ case _DRM_SCATTER_GATHER:
+ /* Handle it, but do nothing, if REALLY_HAVE_SG
+ * isn't defined.
+ */
+#if __REALLY_HAVE_SG
+ if(dev->sg) {
+ DRM(sg_cleanup)(dev->sg);
+ dev->sg = NULL;
+ }
+#endif
+ break;
+ }
+ TAILQ_REMOVE(dev->maplist, list, link);
+ DRM(free)(list, sizeof(*list), DRM_MEM_MAPS);
+ DRM(free)(map, sizeof(*map), DRM_MEM_MAPS);
+ }
+ DRM(free)(dev->maplist, sizeof(*dev->maplist), DRM_MEM_MAPS);
+ dev->maplist = NULL;
+ }
+
+#if __HAVE_DMA_QUEUE || __HAVE_MULTIPLE_DMA_QUEUES
+ if ( dev->queuelist ) {
+ for ( i = 0 ; i < dev->queue_count ; i++ ) {
+ DRM(waitlist_destroy)( &dev->queuelist[i]->waitlist );
+ if ( dev->queuelist[i] ) {
+ DRM(free)( dev->queuelist[i],
+ sizeof(*dev->queuelist[0]),
+ DRM_MEM_QUEUES );
+ dev->queuelist[i] = NULL;
+ }
+ }
+ DRM(free)( dev->queuelist,
+ dev->queue_slots * sizeof(*dev->queuelist),
+ DRM_MEM_QUEUES );
+ dev->queuelist = NULL;
+ }
+ dev->queue_count = 0;
+#endif
+
+#if __HAVE_DMA
+ DRM(dma_takedown)( dev );
+#endif
+ if ( dev->lock.hw_lock ) {
+ dev->lock.hw_lock = NULL; /* SHM removed */
+ dev->lock.pid = 0;
+ DRM_OS_WAKEUP_INT(&dev->lock.lock_queue);
+ }
+ DRM_OS_UNLOCK;
+
+ return 0;
+}
+
+/*
+ * Figure out how many instances to initialize.
+ */
+static int drm_count_cards(void)
+{
+ int num = 0;
+#if defined(DRIVER_CARD_LIST)
+ int i;
+ drm_pci_list_t *l;
+ u16 device, vendor;
+ struct pci_dev *pdev = NULL;
+#endif
+
+ DRM_DEBUG( "\n" );
+
+#if defined(DRIVER_COUNT_CARDS)
+ num = DRIVER_COUNT_CARDS();
+#elif defined(DRIVER_CARD_LIST)
+ for (i = 0, l = DRIVER_CARD_LIST; l[i].vendor != 0; i++) {
+ pdev = NULL;
+ vendor = l[i].vendor;
+ device = l[i].device;
+ if(device == 0xffff) device = PCI_ANY_ID;
+ if(vendor == 0xffff) vendor = PCI_ANY_ID;
+ while ((pdev = pci_find_device(vendor, device, pdev))) {
+ num++; /* FIXME: What about two cards of the same device id? */
+ }
+ }
+#else
+ num = DRIVER_NUM_CARDS;
+#endif
+ DRM_DEBUG("numdevs = %d\n", num);
+ return num;
+}
+
+/* drm_init is called via init_module at module load time, or via
+ * linux/init/main.c (this is not currently supported).
+ */
+static int DRM(init)( device_t nbdev )
+{
+
+ drm_device_t *dev;
+ int i;
+#if __HAVE_CTX_BITMAP
+ int retcode;
+#endif
+ DRM_DEBUG( "\n" );
+
+#ifdef MODULE
+ DRM(parse_options)( drm_opts );
+#endif
+
+ DRM(numdevs) = drm_count_cards();
+ /* Force at least one instance. */
+ if (DRM(numdevs) <= 0)
+ DRM(numdevs) = 1;
+
+ DRM(device) = DRM_OS_MALLOC(sizeof(*DRM(device)) * DRM(numdevs));
+ if (!DRM(device)) {
+ DRM_OS_RETURN(ENOMEM);
+ }
+ DRM(minor) = DRM_OS_MALLOC(sizeof(*(DRM(minor))) * DRM(numdevs));
+ if (!DRM(minor)) {
+ DRM_OS_FREE(DRM(device));
+ DRM_OS_RETURN(ENOMEM);
+ }
+
+ DRIVER_PREINIT();
+
+
+ for (i = 0; i < DRM(numdevs); i++) {
+ int unit = device_get_unit(nbdev);
+ /* FIXME??? - multihead !!! */
+ dev = device_get_softc(nbdev);
+ memset( (void *)dev, 0, sizeof(*dev) );
+ DRM(minor)[i]=unit;
+ DRM_OS_SPININIT(dev->count_lock, "drm device");
+ lockinit(&dev->dev_lock, PZERO, "drmlk", 0, 0);
+ dev->device = nbdev;
+ dev->devnode = make_dev( &DRM(cdevsw),
+ unit,
+ DRM_DEV_UID,
+ DRM_DEV_GID,
+ DRM_DEV_MODE,
+ "dri/card%d", unit );
+ dev->name = DRIVER_NAME;
+ DRM(mem_init)();
+ DRM(sysctl_init)(dev);
+ TAILQ_INIT(&dev->files);
+
+#if __REALLY_HAVE_AGP
+ dev->agp = DRM(agp_init)();
+#if __MUST_HAVE_AGP
+ if ( dev->agp == NULL ) {
+ DRM_ERROR( "Cannot initialize the agpgart module.\n" );
+ DRM(sysctl_cleanup)( dev );
+ destroy_dev(dev->devnode);
+ DRM(takedown)( dev );
+ DRM_OS_RETURN(ENOMEM);
+ }
+#endif
+#if __REALLY_HAVE_MTRR
+ if (dev->agp)
+ dev->agp->agp_mtrr = mtrr_add( dev->agp->agp_info.aper_base,
+ dev->agp->agp_info.aper_size*1024*1024,
+ MTRR_TYPE_WRCOMB,
+ 1 );
+#endif
+#endif
+
+#if __HAVE_CTX_BITMAP
+ retcode = DRM(ctxbitmap_init)( dev );
+ if( retcode ) {
+ DRM_ERROR( "Cannot allocate memory for context bitmap.\n" );
+ DRM(sysctl_cleanup)( dev );
+ destroy_dev(dev->devnode);
+ DRM(takedown)( dev );
+ return retcode;
+ }
+#endif
+ DRM_INFO( "Initialized %s %d.%d.%d %s on minor %d\n",
+ DRIVER_NAME,
+ DRIVER_MAJOR,
+ DRIVER_MINOR,
+ DRIVER_PATCHLEVEL,
+ DRIVER_DATE,
+ DRM(minor)[i] );
+ }
+
+ DRIVER_POSTINIT();
+
+ return 0;
+}
+
+/* drm_cleanup is called via cleanup_module at module unload time.
+ */
+static void DRM(cleanup)(device_t nbdev)
+{
+ drm_device_t *dev;
+ int i;
+
+ DRM_DEBUG( "\n" );
+
+ for (i = DRM(numdevs) - 1; i >= 0; i--) {
+ /* FIXME??? - multihead */
+ dev = device_get_softc(nbdev);
+ DRM(sysctl_cleanup)( dev );
+ destroy_dev(dev->devnode);
+#if __HAVE_CTX_BITMAP
+ DRM(ctxbitmap_cleanup)( dev );
+#endif
+
+#if __REALLY_HAVE_AGP && __REALLY_HAVE_MTRR
+ if ( dev->agp && dev->agp->agp_mtrr >= 0) {
+ int retval;
+ retval = mtrr_del( dev->agp->agp_mtrr,
+ dev->agp->agp_info.aper_base,
+ dev->agp->agp_info.aper_size*1024*1024 );
+ DRM_DEBUG( "mtrr_del=%d\n", retval );
+ }
+#endif
+
+ DRM(takedown)( dev );
+
+#if __REALLY_HAVE_AGP
+ if ( dev->agp ) {
+ DRM(agp_uninit)();
+ DRM(free)( dev->agp, sizeof(*dev->agp), DRM_MEM_AGPLISTS );
+ dev->agp = NULL;
+ }
+#endif
+ }
+ DRIVER_POSTCLEANUP();
+ DRM_OS_FREE(DRM(minor));
+ DRM_OS_FREE(DRM(device));
+ DRM(numdevs) = 0;
+}
+
+
+int DRM(version)( DRM_OS_IOCTL )
+{
+ drm_version_t version;
+ int len;
+
+ DRM_OS_KRNFROMUSR( version, (drm_version_t *)data, sizeof(version) );
+
+#define DRM_COPY( name, value ) \
+ len = strlen( value ); \
+ if ( len > name##_len ) len = name##_len; \
+ name##_len = strlen( value ); \
+ if ( len && name ) { \
+ if ( DRM_OS_COPYTOUSR( name, value, len ) ) \
+ DRM_OS_RETURN(EFAULT); \
+ }
+
+ version.version_major = DRIVER_MAJOR;
+ version.version_minor = DRIVER_MINOR;
+ version.version_patchlevel = DRIVER_PATCHLEVEL;
+
+ DRM_COPY( version.name, DRIVER_NAME );
+ DRM_COPY( version.date, DRIVER_DATE );
+ DRM_COPY( version.desc, DRIVER_DESC );
+
+ DRM_OS_KRNTOUSR( (drm_version_t *)data, version, sizeof(version) );
+
+ return 0;
+}
+
+int DRM( open)(dev_t kdev, int flags, int fmt, DRM_OS_STRUCTPROC *p)
+{
+ drm_device_t *dev = NULL;
+ int retcode = 0;
+ int i;
+
+ for (i = 0; i < DRM(numdevs); i++) {
+ /* FIXME ??? - multihead */
+ dev = DRIVER_SOFTC(minor(kdev));
+ }
+ if (!dev) {
+ DRM_OS_RETURN(ENODEV);
+ }
+
+ DRM_DEBUG( "open_count = %d\n", dev->open_count );
+
+ device_busy(dev->device);
+ retcode = DRM(open_helper)(kdev, flags, fmt, p, dev);
+
+ if ( !retcode ) {
+ atomic_inc( &dev->counts[_DRM_STAT_OPENS] );
+ DRM_OS_SPINLOCK( &dev->count_lock );
+ if ( !dev->open_count++ ) {
+ DRM_OS_SPINUNLOCK( &dev->count_lock );
+ return DRM(setup)( dev );
+ }
+ DRM_OS_SPINUNLOCK( &dev->count_lock );
+ }
+ device_unbusy(dev->device);
+
+ return retcode;
+}
+
+int DRM( close)(dev_t kdev, int flags, int fmt, DRM_OS_STRUCTPROC *p)
+{
+ drm_file_t *priv;
+ drm_device_t *dev = kdev->si_drv1;
+ int retcode = 0;
+
+ DRM_DEBUG( "open_count = %d\n", dev->open_count );
+ priv = DRM(find_file_by_proc)(dev, p);
+ if (!priv) {
+ DRM_DEBUG("can't find authenticator\n");
+ return EINVAL;
+ }
+
+ DRIVER_PRERELEASE();
+
+ /* ========================================================
+ * Begin inline drm_release
+ */
+
+ DRM_DEBUG( "pid = %d, device = 0x%lx, open_count = %d\n",
+ DRM_OS_CURRENTPID, (long)dev->device, dev->open_count );
+
+ if (dev->lock.hw_lock && _DRM_LOCK_IS_HELD(dev->lock.hw_lock->lock)
+ && dev->lock.pid == DRM_OS_CURRENTPID) {
+ DRM_DEBUG("Process %d dead, freeing lock for context %d\n",
+ DRM_OS_CURRENTPID,
+ _DRM_LOCKING_CONTEXT(dev->lock.hw_lock->lock));
+#if HAVE_DRIVER_RELEASE
+ DRIVER_RELEASE();
+#endif
+ DRM(lock_free)(dev,
+ &dev->lock.hw_lock->lock,
+ _DRM_LOCKING_CONTEXT(dev->lock.hw_lock->lock));
+
+ /* FIXME: may require heavy-handed reset of
+ hardware at this point, possibly
+ processed via a callback to the X
+ server. */
+ }
+#if __HAVE_RELEASE
+ else if ( dev->lock.hw_lock ) {
+ /* The lock is required to reclaim buffers */
+ for (;;) {
+ if ( !dev->lock.hw_lock ) {
+ /* Device has been unregistered */
+ retcode = EINTR;
+ break;
+ }
+ if ( DRM(lock_take)( &dev->lock.hw_lock->lock,
+ DRM_KERNEL_CONTEXT ) ) {
+ dev->lock.pid = p->p_pid;
+ dev->lock.lock_time = jiffies;
+ atomic_inc( &dev->counts[_DRM_STAT_LOCKS] );
+ break; /* Got lock */
+ }
+ /* Contention */
+#if 0
+ atomic_inc( &dev->total_sleeps );
+#endif
+ retcode = tsleep(&dev->lock.lock_queue,
+ PZERO|PCATCH,
+ "drmlk2",
+ 0);
+ if (retcode)
+ break;
+ }
+ if( !retcode ) {
+ DRIVER_RELEASE();
+ DRM(lock_free)( dev, &dev->lock.hw_lock->lock,
+ DRM_KERNEL_CONTEXT );
+ }
+ }
+#elif __HAVE_DMA
+ DRM(reclaim_buffers)( dev, priv->pid );
+#endif
+
+ funsetown(dev->buf_sigio);
+
+ DRM_OS_LOCK;
+ priv = DRM(find_file_by_proc)(dev, p);
+ if (priv) {
+ priv->refs--;
+ if (!priv->refs) {
+ TAILQ_REMOVE(&dev->files, priv, link);
+ }
+ }
+ DRM_OS_UNLOCK;
+
+ DRM(free)( priv, sizeof(*priv), DRM_MEM_FILES );
+
+ /* ========================================================
+ * End inline drm_release
+ */
+
+ atomic_inc( &dev->counts[_DRM_STAT_CLOSES] );
+ DRM_OS_SPINLOCK( &dev->count_lock );
+ if ( !--dev->open_count ) {
+ if ( atomic_read( &dev->ioctl_count ) || dev->blocked ) {
+ DRM_ERROR( "Device busy: %ld %d\n",
+ (unsigned long)atomic_read( &dev->ioctl_count ),
+ dev->blocked );
+ DRM_OS_SPINUNLOCK( &dev->count_lock );
+ DRM_OS_RETURN(EBUSY);
+ }
+ DRM_OS_SPINUNLOCK( &dev->count_lock );
+ device_unbusy(dev->device);
+ return DRM(takedown)( dev );
+ }
+ DRM_OS_SPINUNLOCK( &dev->count_lock );
+
+
+ DRM_OS_RETURN(retcode);
+}
+
+/* DRM(ioctl) is called whenever a process performs an ioctl on /dev/drm.
+ */
+int DRM(ioctl)( DRM_OS_IOCTL )
+{
+ DRM_OS_DEVICE;
+ int retcode = 0;
+ drm_ioctl_desc_t *ioctl;
+ d_ioctl_t *func;
+ int nr = DRM_IOCTL_NR(cmd);
+ DRM_OS_PRIV;
+
+ atomic_inc( &dev->ioctl_count );
+ atomic_inc( &dev->counts[_DRM_STAT_IOCTLS] );
+ ++priv->ioctl_count;
+
+ DRM_DEBUG( "pid=%d, cmd=0x%02lx, nr=0x%02x, dev 0x%lx, auth=%d\n",
+ DRM_OS_CURRENTPID, cmd, nr, (long)dev->device, priv->authenticated );
+
+ switch (cmd) {
+ case FIONBIO:
+ atomic_dec(&dev->ioctl_count);
+ return 0;
+
+ case FIOASYNC:
+ atomic_dec(&dev->ioctl_count);
+ dev->flags |= FASYNC;
+ return 0;
+
+ case FIOSETOWN:
+ atomic_dec(&dev->ioctl_count);
+ return fsetown(*(int *)data, &dev->buf_sigio);
+
+ case FIOGETOWN:
+ atomic_dec(&dev->ioctl_count);
+ *(int *) data = fgetown(dev->buf_sigio);
+ return 0;
+ }
+
+ if ( nr >= DRIVER_IOCTL_COUNT ) {
+ retcode = EINVAL;
+ } else {
+ ioctl = &DRM(ioctls)[nr];
+ func = ioctl->func;
+
+ if ( !func ) {
+ DRM_DEBUG( "no function\n" );
+ retcode = EINVAL;
+ } else if ( ( ioctl->root_only && DRM_OS_CHECKSUSER )
+ || ( ioctl->auth_needed && !priv->authenticated ) ) {
+ retcode = EACCES;
+ } else {
+ retcode = func( kdev, cmd, data, flags, p );
+ }
+ }
+
+ atomic_dec( &dev->ioctl_count );
+ DRM_OS_RETURN(retcode);
+}
+
+int DRM(lock)( DRM_OS_IOCTL )
+{
+ DRM_OS_DEVICE;
+ drm_lock_t lock;
+ int ret = 0;
+#if __HAVE_MULTIPLE_DMA_QUEUES
+ drm_queue_t *q;
+#endif
+#if __HAVE_DMA_HISTOGRAM
+ cycles_t start;
+
+ dev->lck_start = start = get_cycles();
+#endif
+
+ DRM_OS_KRNFROMUSR( lock, (drm_lock_t *)data, sizeof(lock) );
+
+ if ( lock.context == DRM_KERNEL_CONTEXT ) {
+ DRM_ERROR( "Process %d using kernel context %d\n",
+ DRM_OS_CURRENTPID, lock.context );
+ DRM_OS_RETURN(EINVAL);
+ }
+
+ DRM_DEBUG( "%d (pid %d) requests lock (0x%08x), flags = 0x%08x\n",
+ lock.context, DRM_OS_CURRENTPID,
+ dev->lock.hw_lock->lock, lock.flags );
+
+#if __HAVE_DMA_QUEUE
+ if ( lock.context < 0 )
+ DRM_OS_RETURN(EINVAL);
+#elif __HAVE_MULTIPLE_DMA_QUEUES
+ if ( lock.context < 0 || lock.context >= dev->queue_count )
+ DRM_OS_RETURN(EINVAL);
+ q = dev->queuelist[lock.context];
+#endif
+
+#if __HAVE_DMA_FLUSH
+ ret = DRM(flush_block_and_flush)( dev, lock.context, lock.flags );
+#endif
+ if ( !ret ) {
+ for (;;) {
+ if ( !dev->lock.hw_lock ) {
+ /* Device has been unregistered */
+ ret = EINTR;
+ break;
+ }
+ if ( DRM(lock_take)( &dev->lock.hw_lock->lock,
+ lock.context ) ) {
+ dev->lock.pid = DRM_OS_CURRENTPID;
+ dev->lock.lock_time = jiffies;
+ atomic_inc( &dev->counts[_DRM_STAT_LOCKS] );
+ break; /* Got lock */
+ }
+
+ /* Contention */
+ ret = tsleep(&dev->lock.lock_queue,
+ PZERO|PCATCH,
+ "drmlk2",
+ 0);
+ if (ret)
+ break;
+ }
+ }
+
+#if __HAVE_DMA_FLUSH
+ DRM(flush_unblock)( dev, lock.context, lock.flags ); /* cleanup phase */
+#endif
+
+ if ( !ret ) {
+
+#if __HAVE_DMA_READY
+ if ( lock.flags & _DRM_LOCK_READY ) {
+ DRIVER_DMA_READY();
+ }
+#endif
+#if __HAVE_DMA_QUIESCENT
+ if ( lock.flags & _DRM_LOCK_QUIESCENT ) {
+ DRIVER_DMA_QUIESCENT();
+ }
+#endif
+#if __HAVE_KERNEL_CTX_SWITCH
+ if ( dev->last_context != lock.context ) {
+ DRM(context_switch)(dev, dev->last_context,
+ lock.context);
+ }
+#endif
+ }
+
+ DRM_DEBUG( "%d %s\n", lock.context, ret ? "interrupted" : "has lock" );
+
+#if __HAVE_DMA_HISTOGRAM
+ atomic_inc(&dev->histo.lacq[DRM(histogram_slot)(get_cycles()-start)]);
+#endif
+
+ DRM_OS_RETURN(ret);
+}
+
+
+int DRM(unlock)( DRM_OS_IOCTL )
+{
+ DRM_OS_DEVICE;
+ drm_lock_t lock;
+
+ DRM_OS_KRNFROMUSR( lock, (drm_lock_t *)data, sizeof(lock) ) ;
+
+ if ( lock.context == DRM_KERNEL_CONTEXT ) {
+ DRM_ERROR( "Process %d using kernel context %d\n",
+ DRM_OS_CURRENTPID, lock.context );
+ DRM_OS_RETURN(EINVAL);
+ }
+
+ atomic_inc( &dev->counts[_DRM_STAT_UNLOCKS] );
+
+#if __HAVE_KERNEL_CTX_SWITCH
+ /* We no longer really hold it, but if we are the next
+ * agent to request it then we should just be able to
+ * take it immediately and not eat the ioctl.
+ */
+ dev->lock.pid = 0;
+ {
+ __volatile__ unsigned int *plock = &dev->lock.hw_lock->lock;
+ unsigned int old, new, prev, ctx;
+
+ ctx = lock.context;
+ do {
+ old = *plock;
+ new = ctx;
+ prev = cmpxchg(plock, old, new);
+ } while (prev != old);
+ }
+ wake_up_interruptible(&dev->lock.lock_queue);
+#else
+ DRM(lock_transfer)( dev, &dev->lock.hw_lock->lock,
+ DRM_KERNEL_CONTEXT );
+#if __HAVE_DMA_SCHEDULE
+ DRM(dma_schedule)( dev, 1 );
+#endif
+
+ /* FIXME: Do we ever really need to check this???
+ */
+ if ( 1 /* !dev->context_flag */ ) {
+ if ( DRM(lock_free)( dev, &dev->lock.hw_lock->lock,
+ DRM_KERNEL_CONTEXT ) ) {
+ DRM_ERROR( "\n" );
+ }
+ }
+#endif /* !__HAVE_KERNEL_CTX_SWITCH */
+
+ return 0;
+}
+
+#if DRM_LINUX
+static linux_ioctl_function_t DRM( linux_ioctl);
+static struct linux_ioctl_handler DRM( handler) = {DRM( linux_ioctl), LINUX_IOCTL_DRM_MIN, LINUX_IOCTL_DRM_MAX};
+SYSINIT (DRM( register), SI_SUB_KLD, SI_ORDER_MIDDLE, linux_ioctl_register_handler, &DRM( handler));
+SYSUNINIT(DRM( unregister), SI_SUB_KLD, SI_ORDER_MIDDLE, linux_ioctl_unregister_handler, &DRM( handler));
+
+/*
+ * Linux emulation IOCTL
+ */
+static int
+DRM(linux_ioctl)(DRM_OS_STRUCTPROC *p, struct linux_ioctl_args* args)
+{
+#if (__FreeBSD_version >= 500000)
+ struct file *fp = p->td_proc->p_fd->fd_ofiles[args->fd];
+#else
+ struct file *fp = p->p_fd->fd_ofiles[args->fd];
+#endif
+ u_long cmd = args->cmd;
+ caddr_t data = (caddr_t) args->arg;
+ /*
+ * Pass the ioctl off to our standard handler.
+ */
+ return(fo_ioctl(fp, cmd, data, p));
+}
+#endif /* DRM_LINUX */
diff --git a/bsd/drm_fops.h b/bsd/drm_fops.h
new file mode 100644
index 00000000..53af39cd
--- /dev/null
+++ b/bsd/drm_fops.h
@@ -0,0 +1,223 @@
+/* drm_fops.h -- File operations for DRM -*- linux-c -*-
+ * Created: Mon Jan 4 08:58:31 1999 by faith@valinux.com
+ *
+ * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
+ * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors:
+ * Rickard E. (Rik) Faith <faith@valinux.com>
+ * Daryll Strauss <daryll@valinux.com>
+ * Gareth Hughes <gareth@valinux.com>
+ */
+
+#define __NO_VERSION__
+#include "drmP.h"
+
+
+#include <sys/signalvar.h>
+#include <sys/poll.h>
+
+drm_file_t *DRM(find_file_by_proc)(drm_device_t *dev, DRM_OS_STRUCTPROC *p)
+{
+#if __FreeBSD_version >= 500021
+ uid_t uid = p->td_proc->p_ucred->cr_svuid;
+ pid_t pid = p->td_proc->p_pid;
+#else
+ uid_t uid = p->p_cred->p_svuid;
+ pid_t pid = p->p_pid;
+#endif
+ drm_file_t *priv;
+
+ TAILQ_FOREACH(priv, &dev->files, link)
+ if (priv->pid == pid && priv->uid == uid)
+ return priv;
+ return NULL;
+}
+
+/* DRM(open) is called whenever a process opens /dev/drm. */
+
+int DRM(open_helper)(dev_t kdev, int flags, int fmt, DRM_OS_STRUCTPROC *p,
+ drm_device_t *dev)
+{
+ int m = minor(kdev);
+ drm_file_t *priv;
+
+ if (flags & O_EXCL)
+ return EBUSY; /* No exclusive opens */
+ dev->flags = flags;
+ if (!DRM(cpu_valid)())
+ DRM_OS_RETURN(EINVAL);
+
+ DRM_DEBUG("pid = %d, minor = %d\n", DRM_OS_CURRENTPID, m);
+
+ /* FIXME: linux mallocs and bzeros here */
+ priv = (drm_file_t *) DRM(find_file_by_proc)(dev, p);
+ if (priv) {
+ priv->refs++;
+ } else {
+ priv = (drm_file_t *) DRM(alloc)(sizeof(*priv), DRM_MEM_FILES);
+ bzero(priv, sizeof(*priv));
+#if __FreeBSD_version >= 500000
+ priv->uid = p->td_proc->p_ucred->cr_svuid;
+ priv->pid = p->td_proc->p_pid;
+#else
+ priv->uid = p->p_cred->p_svuid;
+ priv->pid = p->p_pid;
+#endif
+
+ priv->refs = 1;
+ priv->minor = m;
+ priv->devXX = dev;
+ priv->ioctl_count = 0;
+ priv->authenticated = !DRM_OS_CHECKSUSER;
+ lockmgr(&dev->dev_lock, LK_EXCLUSIVE, 0, p);
+ TAILQ_INSERT_TAIL(&dev->files, priv, link);
+ lockmgr(&dev->dev_lock, LK_RELEASE, 0, p);
+ }
+
+ kdev->si_drv1 = dev;
+
+
+ return 0;
+}
+
+
+/* The drm_read and drm_write_string code (especially that which manages
+ the circular buffer), is based on Alessandro Rubini's LINUX DEVICE
+ DRIVERS (Cambridge: O'Reilly, 1998), pages 111-113. */
+
+ssize_t DRM(read)(dev_t kdev, struct uio *uio, int ioflag)
+{
+ DRM_OS_DEVICE;
+ int left;
+ int avail;
+ int send;
+ int cur;
+ int error = 0;
+
+ DRM_DEBUG("%p, %p\n", dev->buf_rp, dev->buf_wp);
+
+ while (dev->buf_rp == dev->buf_wp) {
+ DRM_DEBUG(" sleeping\n");
+ if (dev->flags & FASYNC)
+ return EWOULDBLOCK;
+ error = tsleep(&dev->buf_rp, PZERO|PCATCH, "drmrd", 0);
+ if (error) {
+ DRM_DEBUG(" interrupted\n");
+ return error;
+ }
+ DRM_DEBUG(" awake\n");
+ }
+
+ left = (dev->buf_rp + DRM_BSZ - dev->buf_wp) % DRM_BSZ;
+ avail = DRM_BSZ - left;
+ send = DRM_MIN(avail, uio->uio_resid);
+
+ while (send) {
+ if (dev->buf_wp > dev->buf_rp) {
+ cur = DRM_MIN(send, dev->buf_wp - dev->buf_rp);
+ } else {
+ cur = DRM_MIN(send, dev->buf_end - dev->buf_rp);
+ }
+ error = uiomove(dev->buf_rp, cur, uio);
+ if (error)
+ break;
+ dev->buf_rp += cur;
+ if (dev->buf_rp == dev->buf_end) dev->buf_rp = dev->buf;
+ send -= cur;
+ }
+
+ wakeup(&dev->buf_wp);
+ return error;
+}
+
+int DRM(write_string)(drm_device_t *dev, const char *s)
+{
+ int left = (dev->buf_rp + DRM_BSZ - dev->buf_wp) % DRM_BSZ;
+ int send = strlen(s);
+ int count;
+
+ DRM_DEBUG("%d left, %d to send (%p, %p)\n",
+ left, send, dev->buf_rp, dev->buf_wp);
+
+ if (left == 1 || dev->buf_wp != dev->buf_rp) {
+ DRM_ERROR("Buffer not empty (%d left, wp = %p, rp = %p)\n",
+ left,
+ dev->buf_wp,
+ dev->buf_rp);
+ }
+
+ while (send) {
+ if (dev->buf_wp >= dev->buf_rp) {
+ count = DRM_MIN(send, dev->buf_end - dev->buf_wp);
+ if (count == left) --count; /* Leave a hole */
+ } else {
+ count = DRM_MIN(send, dev->buf_rp - dev->buf_wp - 1);
+ }
+ strncpy(dev->buf_wp, s, count);
+ dev->buf_wp += count;
+ if (dev->buf_wp == dev->buf_end) dev->buf_wp = dev->buf;
+ send -= count;
+ }
+
+ if (dev->buf_selecting) {
+ dev->buf_selecting = 0;
+ selwakeup(&dev->buf_sel);
+ }
+
+ DRM_DEBUG("dev->buf_sigio=%p\n", dev->buf_sigio);
+ if (dev->buf_sigio) {
+ DRM_DEBUG("dev->buf_sigio->sio_pgid=%d\n", dev->buf_sigio->sio_pgid);
+ pgsigio(dev->buf_sigio, SIGIO, 0);
+ }
+ DRM_DEBUG("waking\n");
+ wakeup(&dev->buf_rp);
+
+ return 0;
+}
+
+int DRM(poll)(dev_t kdev, int events, DRM_OS_STRUCTPROC *p)
+{
+ drm_device_t *dev = kdev->si_drv1;
+ int s;
+ int revents = 0;
+
+ s = spldrm();
+ if (events & (POLLIN | POLLRDNORM)) {
+ int left = (dev->buf_rp + DRM_BSZ - dev->buf_wp) % DRM_BSZ;
+ if (left > 0)
+ revents |= events & (POLLIN | POLLRDNORM);
+ else
+ selrecord(p, &dev->buf_sel);
+ }
+ splx(s);
+
+ return revents;
+}
+
+int DRM(write)(dev_t kdev, struct uio *uio, int ioflag)
+{
+ DRM_DEBUG("pid = %d, device = %p, open_count = %d\n",
+ curproc->p_pid, ((drm_device_t *)kdev->si_drv1)->device, ((drm_device_t *)kdev->si_drv1)->open_count);
+ return 0;
+}
diff --git a/bsd/drm_init.h b/bsd/drm_init.h
new file mode 100644
index 00000000..e2ab6080
--- /dev/null
+++ b/bsd/drm_init.h
@@ -0,0 +1,110 @@
+/* drm_init.h -- Setup/Cleanup for DRM -*- linux-c -*-
+ * Created: Mon Jan 4 08:58:31 1999 by faith@valinux.com
+ *
+ * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
+ * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors:
+ * Rickard E. (Rik) Faith <faith@valinux.com>
+ * Gareth Hughes <gareth@valinux.com>
+ */
+
+#define __NO_VERSION__
+#include "drmP.h"
+
+#if 0 && DRM_DEBUG_CODE
+int DRM(flags) = DRM_FLAG_DEBUG;
+#else
+int DRM(flags) = 0;
+#endif
+
+/* drm_parse_option parses a single option. See description for
+ * drm_parse_options for details.
+ */
+static void DRM(parse_option)(char *s)
+{
+ char *c, *r;
+
+ DRM_DEBUG("\"%s\"\n", s);
+ if (!s || !*s) return;
+ for (c = s; *c && *c != ':'; c++); /* find : or \0 */
+ if (*c) r = c + 1; else r = NULL; /* remember remainder */
+ *c = '\0'; /* terminate */
+ if (!strcmp(s, "noctx")) {
+ DRM(flags) |= DRM_FLAG_NOCTX;
+ DRM_INFO("Server-mediated context switching OFF\n");
+ return;
+ }
+ if (!strcmp(s, "debug")) {
+ DRM(flags) |= DRM_FLAG_DEBUG;
+ DRM_INFO("Debug messages ON\n");
+ return;
+ }
+ DRM_ERROR("\"%s\" is not a valid option\n", s);
+ return;
+}
+
+/* drm_parse_options parse the insmod "drm_opts=" options, or the command-line
+ * options passed to the kernel via LILO. The grammar of the format is as
+ * follows:
+ *
+ * drm ::= 'drm_opts=' option_list
+ * option_list ::= option [ ';' option_list ]
+ * option ::= 'device:' major
+ * | 'debug'
+ * | 'noctx'
+ * major ::= INTEGER
+ *
+ * Note that 's' contains option_list without the 'drm_opts=' part.
+ *
+ * device=major,minor specifies the device number used for /dev/drm
+ * if major == 0 then the misc device is used
+ * if major == 0 and minor == 0 then dynamic misc allocation is used
+ * debug=on specifies that debugging messages will be printk'd
+ * debug=trace specifies that each function call will be logged via printk
+ * debug=off turns off all debugging options
+ *
+ */
+
+void DRM(parse_options)(char *s)
+{
+ char *h, *t, *n;
+
+ DRM_DEBUG("\"%s\"\n", s ?: "");
+ if (!s || !*s) return;
+
+ for (h = t = n = s; h && *h; h = n) {
+ for (; *t && *t != ';'; t++); /* find ; or \0 */
+ if (*t) n = t + 1; else n = NULL; /* remember next */
+ *t = '\0'; /* terminate */
+ DRM(parse_option)(h); /* parse */
+ }
+}
+
+/* drm_cpu_valid returns non-zero if the DRI will run on this CPU, and 0
+ * otherwise.
+ */
+int DRM(cpu_valid)(void)
+{
+ return 1;
+}
diff --git a/bsd/drm_ioctl.h b/bsd/drm_ioctl.h
new file mode 100644
index 00000000..1e8281e6
--- /dev/null
+++ b/bsd/drm_ioctl.h
@@ -0,0 +1,237 @@
+/* drm_ioctl.h -- IOCTL processing for DRM -*- linux-c -*-
+ * Created: Fri Jan 8 09:01:26 1999 by faith@valinux.com
+ *
+ * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
+ * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors:
+ * Rickard E. (Rik) Faith <faith@valinux.com>
+ * Gareth Hughes <gareth@valinux.com>
+ */
+
+#define __NO_VERSION__
+#include "drmP.h"
+#include <sys/bus.h>
+#include <pci/pcivar.h>
+
+int DRM(irq_busid)( DRM_OS_IOCTL )
+{
+ drm_irq_busid_t id;
+ devclass_t pci;
+ device_t bus, dev;
+ device_t *kids;
+ int error, i, num_kids;
+
+ DRM_OS_KRNFROMUSR( id, (drm_irq_busid_t *)data, sizeof(id) );
+
+ pci = devclass_find("pci");
+ if (!pci)
+ return ENOENT;
+ bus = devclass_get_device(pci, id.busnum);
+ if (!bus)
+ return ENOENT;
+ error = device_get_children(bus, &kids, &num_kids);
+ if (error)
+ return error;
+
+ dev = 0;
+ for (i = 0; i < num_kids; i++) {
+ dev = kids[i];
+ if (pci_get_slot(dev) == id.devnum
+ && pci_get_function(dev) == id.funcnum)
+ break;
+ }
+
+ free(kids, M_TEMP);
+
+ if (i != num_kids)
+ id.irq = pci_get_irq(dev);
+ else
+ id.irq = 0;
+ DRM_DEBUG("%d:%d:%d => IRQ %d\n",
+ id.busnum, id.devnum, id.funcnum, id.irq);
+
+ DRM_OS_KRNTOUSR( (drm_irq_busid_t *)data, id, sizeof(id) );
+
+ return 0;
+}
+
+int DRM(getunique)( DRM_OS_IOCTL )
+{
+ DRM_OS_DEVICE;
+ drm_unique_t u;
+
+ DRM_OS_KRNFROMUSR( u, (drm_unique_t *)data, sizeof(u) );
+
+ if (u.unique_len >= dev->unique_len) {
+ if (DRM_OS_COPYTOUSR(u.unique, dev->unique, dev->unique_len))
+ DRM_OS_RETURN(EFAULT);
+ }
+ u.unique_len = dev->unique_len;
+
+ DRM_OS_KRNTOUSR( (drm_unique_t *)data, u, sizeof(u) );
+
+ return 0;
+}
+
+int DRM(setunique)( DRM_OS_IOCTL )
+{
+ DRM_OS_DEVICE;
+ drm_unique_t u;
+
+ if (dev->unique_len || dev->unique)
+ DRM_OS_RETURN(EBUSY);
+
+ DRM_OS_KRNFROMUSR( u, (drm_unique_t *)data, sizeof(u) );
+
+ if (!u.unique_len || u.unique_len > 1024)
+ DRM_OS_RETURN(EINVAL);
+
+ dev->unique_len = u.unique_len;
+ dev->unique = DRM(alloc)(u.unique_len + 1, DRM_MEM_DRIVER);
+
+ if(!dev->unique) DRM_OS_RETURN(ENOMEM);
+
+ if (DRM_OS_COPYFROMUSR(dev->unique, u.unique, dev->unique_len))
+ DRM_OS_RETURN(EFAULT);
+
+ dev->unique[dev->unique_len] = '\0';
+
+ dev->devname = DRM(alloc)(strlen(dev->name) + strlen(dev->unique) + 2,
+ DRM_MEM_DRIVER);
+ if(!dev->devname) {
+ DRM(free)(dev->devname, sizeof(*dev->devname), DRM_MEM_DRIVER);
+ DRM_OS_RETURN(ENOMEM);
+ }
+ sprintf(dev->devname, "%s@%s", dev->name, dev->unique);
+
+
+ return 0;
+}
+
+
+int DRM(getmap)( DRM_OS_IOCTL )
+{
+ DRM_OS_DEVICE;
+ drm_map_t map;
+ drm_map_t *mapinlist;
+ drm_map_list_entry_t *list;
+ int idx;
+ int i = 0;
+
+ DRM_OS_KRNFROMUSR( map, (drm_map_t *)data, sizeof(map) );
+
+ idx = map.offset;
+
+ DRM_OS_LOCK;
+ if (idx < 0 || idx >= dev->map_count) {
+ DRM_OS_UNLOCK;
+ DRM_OS_RETURN(EINVAL);
+ }
+
+ TAILQ_FOREACH(list, dev->maplist, link) {
+ mapinlist = list->map;
+ if (i==idx) {
+ map.offset = mapinlist->offset;
+ map.size = mapinlist->size;
+ map.type = mapinlist->type;
+ map.flags = mapinlist->flags;
+ map.handle = mapinlist->handle;
+ map.mtrr = mapinlist->mtrr;
+ break;
+ }
+ i++;
+ }
+
+ DRM_OS_UNLOCK;
+
+ if (!list)
+ return EINVAL;
+
+ DRM_OS_KRNTOUSR( (drm_map_t *)data, map, sizeof(map) );
+
+ return 0;
+}
+
+int DRM(getclient)( DRM_OS_IOCTL )
+{
+ DRM_OS_DEVICE;
+ drm_client_t client;
+ drm_file_t *pt;
+ int idx;
+ int i = 0;
+
+ DRM_OS_KRNFROMUSR( client, (drm_client_t *)data, sizeof(client) );
+
+ idx = client.idx;
+ DRM_OS_LOCK;
+ TAILQ_FOREACH(pt, &dev->files, link) {
+ if (i==idx)
+ {
+ client.auth = pt->authenticated;
+ client.pid = pt->pid;
+ client.uid = pt->uid;
+ client.magic = pt->magic;
+ client.iocs = pt->ioctl_count;
+ DRM_OS_UNLOCK;
+
+ *(drm_client_t *)data = client;
+ return 0;
+ }
+ i++;
+ }
+ DRM_OS_UNLOCK;
+
+ DRM_OS_KRNTOUSR( (drm_client_t *)data, client, sizeof(client) );
+
+ return 0;
+}
+
+int DRM(getstats)( DRM_OS_IOCTL )
+{
+ DRM_OS_DEVICE;
+ drm_stats_t stats;
+ int i;
+
+ memset(&stats, 0, sizeof(stats));
+
+ DRM_OS_LOCK;
+
+ for (i = 0; i < dev->counters; i++) {
+ if (dev->types[i] == _DRM_STAT_LOCK)
+ stats.data[i].value
+ = (dev->lock.hw_lock
+ ? dev->lock.hw_lock->lock : 0);
+ else
+ stats.data[i].value = atomic_read(&dev->counts[i]);
+ stats.data[i].type = dev->types[i];
+ }
+
+ stats.count = dev->counters;
+
+ DRM_OS_UNLOCK;
+
+ DRM_OS_KRNTOUSR( (drm_stats_t *)data, stats, sizeof(stats) );
+
+ return 0;
+}
diff --git a/bsd/drm_lists.h b/bsd/drm_lists.h
new file mode 100644
index 00000000..ea6d34a7
--- /dev/null
+++ b/bsd/drm_lists.h
@@ -0,0 +1,236 @@
+/* drm_lists.h -- Buffer list handling routines -*- linux-c -*-
+ * Created: Mon Apr 19 20:54:22 1999 by faith@valinux.com
+ *
+ * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
+ * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors:
+ * Rickard E. (Rik) Faith <faith@valinux.com>
+ * Gareth Hughes <gareth@valinux.com>
+ */
+
+#define __NO_VERSION__
+#include "drmP.h"
+
+#if __HAVE_DMA_WAITLIST
+
+int DRM(waitlist_create)(drm_waitlist_t *bl, int count)
+{
+ if (bl->count)
+ DRM_OS_RETURN( EINVAL );
+
+ bl->bufs = DRM(alloc)((bl->count + 2) * sizeof(*bl->bufs),
+ DRM_MEM_BUFLISTS);
+
+ if(!bl->bufs) DRM_OS_RETURN(ENOMEM);
+
+ bl->count = count;
+ bl->rp = bl->bufs;
+ bl->wp = bl->bufs;
+ bl->end = &bl->bufs[bl->count+1];
+ DRM_OS_SPININIT( bl->write_lock, "writelock" );
+ DRM_OS_SPININIT( bl->read_lock, "readlock" );
+ return 0;
+}
+
+int DRM(waitlist_destroy)(drm_waitlist_t *bl)
+{
+ if (bl->rp != bl->wp)
+ DRM_OS_RETURN( EINVAL );
+ if (bl->bufs) DRM(free)(bl->bufs,
+ (bl->count + 2) * sizeof(*bl->bufs),
+ DRM_MEM_BUFLISTS);
+ bl->count = 0;
+ bl->bufs = NULL;
+ bl->rp = NULL;
+ bl->wp = NULL;
+ bl->end = NULL;
+ return 0;
+}
+
+int DRM(waitlist_put)(drm_waitlist_t *bl, drm_buf_t *buf)
+{
+ int left;
+ int s;
+ left = DRM_LEFTCOUNT(bl);
+ if (!left) {
+ DRM_ERROR("Overflow while adding buffer %d from pid %d\n",
+ buf->idx, buf->pid);
+ DRM_OS_RETURN( EINVAL );
+ }
+#if __HAVE_DMA_HISTOGRAM
+ getnanotime(&buf->time_queued);
+#endif
+ buf->list = DRM_LIST_WAIT;
+
+ DRM_OS_SPINLOCK(&bl->write_lock);
+ s = spldrm();
+ *bl->wp = buf;
+ if (++bl->wp >= bl->end) bl->wp = bl->bufs;
+ splx(s);
+ DRM_OS_SPINUNLOCK(&bl->write_lock);
+
+ return 0;
+}
+
+drm_buf_t *DRM(waitlist_get)(drm_waitlist_t *bl)
+{
+ drm_buf_t *buf;
+ int s;
+
+ DRM_OS_SPINLOCK(&bl->read_lock);
+ s = spldrm();
+ buf = *bl->rp;
+ if (bl->rp == bl->wp) {
+ splx(s);
+ DRM_OS_SPINUNLOCK(&bl->read_lock);
+ return NULL;
+ }
+ if (++bl->rp >= bl->end) bl->rp = bl->bufs;
+ splx(s);
+ DRM_OS_SPINUNLOCK(&bl->read_lock);
+
+ return buf;
+}
+
+#endif /* __HAVE_DMA_WAITLIST */
+
+
+#if __HAVE_DMA_FREELIST
+
+int DRM(freelist_create)(drm_freelist_t *bl, int count)
+{
+ atomic_set(&bl->count, 0);
+ bl->next = NULL;
+ bl->waiting = 0;
+
+ bl->low_mark = 0;
+ bl->high_mark = 0;
+ atomic_set(&bl->wfh, 0);
+ DRM_OS_SPININIT( bl->lock, "freelistlock" );
+ ++bl->initialized;
+ return 0;
+}
+
+int DRM(freelist_destroy)(drm_freelist_t *bl)
+{
+ atomic_set(&bl->count, 0);
+ bl->next = NULL;
+ return 0;
+}
+
+int DRM(freelist_put)(drm_device_t *dev, drm_freelist_t *bl, drm_buf_t *buf)
+{
+ drm_device_dma_t *dma = dev->dma;
+
+ if (!dma) {
+ DRM_ERROR("No DMA support\n");
+ return 1;
+ }
+
+ if (buf->waiting || buf->pending || buf->list == DRM_LIST_FREE) {
+ DRM_ERROR("Freed buffer %d: w%d, p%d, l%d\n",
+ buf->idx, buf->waiting, buf->pending, buf->list);
+ }
+ if (!bl) return 1;
+#if __HAVE_DMA_HISTOGRAM
+ getnanotime(&buf->time_queued);
+ DRM(histogram_compute)(dev, buf);
+#endif
+ buf->list = DRM_LIST_FREE;
+
+ DRM_OS_SPINLOCK( &bl->lock );
+ buf->next = bl->next;
+ bl->next = buf;
+ DRM_OS_SPINUNLOCK( &bl->lock );
+
+ atomic_inc(&bl->count);
+ if (atomic_read(&bl->count) > dma->buf_count) {
+ DRM_ERROR("%ld of %d buffers free after addition of %d\n",
+ (unsigned long)atomic_read(&bl->count),
+ dma->buf_count, buf->idx);
+ return 1;
+ }
+ /* Check for high water mark */
+ if (atomic_read(&bl->wfh) && atomic_read(&bl->count)>=bl->high_mark) {
+ atomic_set(&bl->wfh, 0);
+ DRM_OS_WAKEUP_INT(&bl->waiting);
+ }
+ return 0;
+}
+
+static drm_buf_t *DRM(freelist_try)(drm_freelist_t *bl)
+{
+ drm_buf_t *buf;
+
+ if (!bl) return NULL;
+
+ /* Get buffer */
+ DRM_OS_SPINLOCK(&bl->lock);
+ if (!bl->next) {
+ DRM_OS_SPINUNLOCK(&bl->lock);
+ return NULL;
+ }
+ buf = bl->next;
+ bl->next = bl->next->next;
+ DRM_OS_SPINUNLOCK(&bl->lock);
+
+ atomic_dec(&bl->count);
+ buf->next = NULL;
+ buf->list = DRM_LIST_NONE;
+ if (buf->waiting || buf->pending) {
+ DRM_ERROR("Free buffer %d: w%d, p%d, l%d\n",
+ buf->idx, buf->waiting, buf->pending, buf->list);
+ }
+
+ return buf;
+}
+
+drm_buf_t *DRM(freelist_get)(drm_freelist_t *bl, int block)
+{
+ drm_buf_t *buf = NULL;
+ int error;
+
+ if (!bl || !bl->initialized) return NULL;
+
+ /* Check for low water mark */
+ if (atomic_read(&bl->count) <= bl->low_mark) /* Became low */
+ atomic_set(&bl->wfh, 1);
+ if (atomic_read(&bl->wfh)) {
+ if (block) {
+ for (;;) {
+ if (!atomic_read(&bl->wfh)
+ && (buf = DRM(freelist_try(bl)))) break;
+ error = tsleep(&bl->waiting, PZERO|PCATCH,
+ "drmfg", 0);
+ if (error)
+ break;
+ }
+ }
+ return buf;
+ }
+
+ return DRM(freelist_try)(bl);
+}
+
+#endif /* __HAVE_DMA_FREELIST */
diff --git a/bsd/drm_lock.h b/bsd/drm_lock.h
new file mode 100644
index 00000000..863a228c
--- /dev/null
+++ b/bsd/drm_lock.h
@@ -0,0 +1,244 @@
+/* lock.c -- IOCTLs for locking -*- linux-c -*-
+ * Created: Tue Feb 2 08:37:54 1999 by faith@valinux.com
+ *
+ * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
+ * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors:
+ * Rickard E. (Rik) Faith <faith@valinux.com>
+ * Gareth Hughes <gareth@valinux.com>
+ */
+
+#define __NO_VERSION__
+#include "drmP.h"
+
+int DRM(block)( DRM_OS_IOCTL )
+{
+ DRM_DEBUG("\n");
+ return 0;
+}
+
+int DRM(unblock)( DRM_OS_IOCTL )
+{
+ DRM_DEBUG("\n");
+ return 0;
+}
+
+int DRM(lock_take)(__volatile__ unsigned int *lock, unsigned int context)
+{
+ unsigned int old, new;
+
+ char failed;
+
+ do {
+ old = *lock;
+ if (old & _DRM_LOCK_HELD) new = old | _DRM_LOCK_CONT;
+ else new = context | _DRM_LOCK_HELD;
+ _DRM_CAS(lock, old, new, failed);
+ } while (failed);
+ if (_DRM_LOCKING_CONTEXT(old) == context) {
+ if (old & _DRM_LOCK_HELD) {
+ if (context != DRM_KERNEL_CONTEXT) {
+ DRM_ERROR("%d holds heavyweight lock\n",
+ context);
+ }
+ return 0;
+ }
+ }
+ if (new == (context | _DRM_LOCK_HELD)) {
+ /* Have lock */
+ return 1;
+ }
+ return 0;
+}
+
+/* This takes a lock forcibly and hands it to context. Should ONLY be used
+ inside *_unlock to give lock to kernel before calling *_dma_schedule. */
+int DRM(lock_transfer)(drm_device_t *dev,
+ __volatile__ unsigned int *lock, unsigned int context)
+{
+ unsigned int old, new;
+ char failed;
+
+ dev->lock.pid = 0;
+ do {
+ old = *lock;
+ new = context | _DRM_LOCK_HELD;
+ _DRM_CAS(lock, old, new, failed);
+ } while (failed);
+ return 1;
+}
+
+int DRM(lock_free)(drm_device_t *dev,
+ __volatile__ unsigned int *lock, unsigned int context)
+{
+ unsigned int old, new;
+ pid_t pid = dev->lock.pid;
+ char failed;
+
+ dev->lock.pid = 0;
+ do {
+ old = *lock;
+ new = 0;
+ _DRM_CAS(lock, old, new, failed);
+ } while (failed);
+ if (_DRM_LOCK_IS_HELD(old) && _DRM_LOCKING_CONTEXT(old) != context) {
+ DRM_ERROR("%d freed heavyweight lock held by %d (pid %d)\n",
+ context,
+ _DRM_LOCKING_CONTEXT(old),
+ pid);
+ return 1;
+ }
+ DRM_OS_WAKEUP_INT(&dev->lock.lock_queue);
+ return 0;
+}
+
+static int DRM(flush_queue)(drm_device_t *dev, int context)
+{
+ int error;
+ int ret = 0;
+ drm_queue_t *q = dev->queuelist[context];
+
+ DRM_DEBUG("\n");
+
+ atomic_inc(&q->use_count);
+ if (atomic_read(&q->use_count) > 1) {
+ atomic_inc(&q->block_write);
+ atomic_inc(&q->block_count);
+ error = tsleep(&q->flush_queue, PZERO|PCATCH, "drmfq", 0);
+ if (error)
+ return error;
+ atomic_dec(&q->block_count);
+ }
+ atomic_dec(&q->use_count);
+
+ /* NOTE: block_write is still incremented!
+ Use drm_flush_unlock_queue to decrement. */
+ return ret;
+}
+
+static int DRM(flush_unblock_queue)(drm_device_t *dev, int context)
+{
+ drm_queue_t *q = dev->queuelist[context];
+
+ DRM_DEBUG("\n");
+
+ atomic_inc(&q->use_count);
+ if (atomic_read(&q->use_count) > 1) {
+ if (atomic_read(&q->block_write)) {
+ atomic_dec(&q->block_write);
+ DRM_OS_WAKEUP_INT(&q->write_queue);
+ }
+ }
+ atomic_dec(&q->use_count);
+ return 0;
+}
+
+int DRM(flush_block_and_flush)(drm_device_t *dev, int context,
+ drm_lock_flags_t flags)
+{
+ int ret = 0;
+ int i;
+
+ DRM_DEBUG("\n");
+
+ if (flags & _DRM_LOCK_FLUSH) {
+ ret = DRM(flush_queue)(dev, DRM_KERNEL_CONTEXT);
+ if (!ret) ret = DRM(flush_queue)(dev, context);
+ }
+ if (flags & _DRM_LOCK_FLUSH_ALL) {
+ for (i = 0; !ret && i < dev->queue_count; i++) {
+ ret = DRM(flush_queue)(dev, i);
+ }
+ }
+ return ret;
+}
+
+int DRM(flush_unblock)(drm_device_t *dev, int context, drm_lock_flags_t flags)
+{
+ int ret = 0;
+ int i;
+
+ DRM_DEBUG("\n");
+
+ if (flags & _DRM_LOCK_FLUSH) {
+ ret = DRM(flush_unblock_queue)(dev, DRM_KERNEL_CONTEXT);
+ if (!ret) ret = DRM(flush_unblock_queue)(dev, context);
+ }
+ if (flags & _DRM_LOCK_FLUSH_ALL) {
+ for (i = 0; !ret && i < dev->queue_count; i++) {
+ ret = DRM(flush_unblock_queue)(dev, i);
+ }
+ }
+
+ return ret;
+}
+
+int DRM(finish)( DRM_OS_IOCTL )
+{
+ DRM_OS_DEVICE;
+ int ret = 0;
+ drm_lock_t lock;
+
+ DRM_DEBUG("\n");
+
+ DRM_OS_KRNFROMUSR( lock, (drm_lock_t *)data, sizeof(lock) );
+
+ ret = DRM(flush_block_and_flush)(dev, lock.context, lock.flags);
+ DRM(flush_unblock)(dev, lock.context, lock.flags);
+ return ret;
+}
+
+/* If we get here, it means that the process has called DRM_IOCTL_LOCK
+ without calling DRM_IOCTL_UNLOCK.
+
+ If the lock is not held, then let the signal proceed as usual.
+
+ If the lock is held, then set the contended flag and keep the signal
+ blocked.
+
+
+ Return 1 if the signal should be delivered normally.
+ Return 0 if the signal should be blocked. */
+
+int DRM(notifier)(void *priv)
+{
+ drm_sigdata_t *s = (drm_sigdata_t *)priv;
+ unsigned int old, new;
+ char failed;
+
+
+ /* Allow signal delivery if lock isn't held */
+ if (!_DRM_LOCK_IS_HELD(s->lock->lock)
+ || _DRM_LOCKING_CONTEXT(s->lock->lock) != s->context) return 1;
+
+ /* Otherwise, set flag to force call to
+ drmUnlock */
+ do {
+ old = s->lock->lock;
+ new = old | _DRM_LOCK_CONT;
+ _DRM_CAS(&s->lock->lock, old, new, failed);
+ } while (failed);
+ return 0;
+}
+
diff --git a/bsd/drm_memory.h b/bsd/drm_memory.h
new file mode 100644
index 00000000..605b42ae
--- /dev/null
+++ b/bsd/drm_memory.h
@@ -0,0 +1,433 @@
+/* drm_memory.h -- Memory management wrappers for DRM -*- linux-c -*-
+ * Created: Thu Feb 4 14:00:34 1999 by faith@valinux.com
+ *
+ * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
+ * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors:
+ * Rickard E. (Rik) Faith <faith@valinux.com>
+ * Gareth Hughes <gareth@valinux.com>
+ */
+
+#define __NO_VERSION__
+#include "drmP.h"
+#include <vm/vm.h>
+#include <vm/pmap.h>
+#if __REALLY_HAVE_AGP
+#include <sys/agpio.h>
+#endif
+
+#define malloctype DRM(M_DRM)
+/* The macros confliced in the MALLOC_DEFINE */
+
+MALLOC_DEFINE(malloctype, "drm", "DRM Data Structures");
+
+#undef malloctype
+
+typedef struct drm_mem_stats {
+ const char *name;
+ int succeed_count;
+ int free_count;
+ int fail_count;
+ unsigned long bytes_allocated;
+ unsigned long bytes_freed;
+} drm_mem_stats_t;
+
+static DRM_OS_SPINTYPE DRM(mem_lock);
+static unsigned long DRM(ram_available) = 0; /* In pages */
+static unsigned long DRM(ram_used) = 0;
+static drm_mem_stats_t DRM(mem_stats)[] = {
+ [DRM_MEM_DMA] = { "dmabufs" },
+ [DRM_MEM_SAREA] = { "sareas" },
+ [DRM_MEM_DRIVER] = { "driver" },
+ [DRM_MEM_MAGIC] = { "magic" },
+ [DRM_MEM_IOCTLS] = { "ioctltab" },
+ [DRM_MEM_MAPS] = { "maplist" },
+ [DRM_MEM_VMAS] = { "vmalist" },
+ [DRM_MEM_BUFS] = { "buflist" },
+ [DRM_MEM_SEGS] = { "seglist" },
+ [DRM_MEM_PAGES] = { "pagelist" },
+ [DRM_MEM_FILES] = { "files" },
+ [DRM_MEM_QUEUES] = { "queues" },
+ [DRM_MEM_CMDS] = { "commands" },
+ [DRM_MEM_MAPPINGS] = { "mappings" },
+ [DRM_MEM_BUFLISTS] = { "buflists" },
+ [DRM_MEM_AGPLISTS] = { "agplist" },
+ [DRM_MEM_SGLISTS] = { "sglist" },
+ [DRM_MEM_TOTALAGP] = { "totalagp" },
+ [DRM_MEM_BOUNDAGP] = { "boundagp" },
+ [DRM_MEM_CTXBITMAP] = { "ctxbitmap"},
+ [DRM_MEM_STUB] = { "stub" },
+ { NULL, 0, } /* Last entry must be null */
+};
+
+void DRM(mem_init)(void)
+{
+ drm_mem_stats_t *mem;
+
+ DRM_OS_SPININIT(DRM(mem_lock), "drm memory");
+
+ for (mem = DRM(mem_stats); mem->name; ++mem) {
+ mem->succeed_count = 0;
+ mem->free_count = 0;
+ mem->fail_count = 0;
+ mem->bytes_allocated = 0;
+ mem->bytes_freed = 0;
+ }
+
+ DRM(ram_available) = 0; /* si.totalram */
+ DRM(ram_used) = 0;
+}
+
+/* drm_mem_info is called whenever a process reads /dev/drm/mem. */
+
+static int DRM(_mem_info) DRM_SYSCTL_HANDLER_ARGS
+{
+ drm_mem_stats_t *pt;
+ char buf[128];
+ int error;
+
+ DRM_SYSCTL_PRINT(" total counts "
+ " | outstanding \n");
+ DRM_SYSCTL_PRINT("type alloc freed fail bytes freed"
+ " | allocs bytes\n\n");
+ DRM_SYSCTL_PRINT("%-9.9s %5d %5d %4d %10lu |\n",
+ "system", 0, 0, 0, DRM(ram_available));
+ DRM_SYSCTL_PRINT("%-9.9s %5d %5d %4d %10lu |\n",
+ "locked", 0, 0, 0, DRM(ram_used));
+ DRM_SYSCTL_PRINT("\n");
+ for (pt = DRM(mem_stats); pt->name; pt++) {
+ DRM_SYSCTL_PRINT("%-9.9s %5d %5d %4d %10lu %10lu | %6d %10ld\n",
+ pt->name,
+ pt->succeed_count,
+ pt->free_count,
+ pt->fail_count,
+ pt->bytes_allocated,
+ pt->bytes_freed,
+ pt->succeed_count - pt->free_count,
+ (long)pt->bytes_allocated
+ - (long)pt->bytes_freed);
+ }
+ SYSCTL_OUT(req, "", 1);
+
+ return 0;
+}
+
+int DRM(mem_info) DRM_SYSCTL_HANDLER_ARGS
+{
+ int ret;
+
+ DRM_OS_SPINLOCK(&DRM(mem_lock));
+ ret = DRM(_mem_info)(oidp, arg1, arg2, req);
+ DRM_OS_SPINUNLOCK(&DRM(mem_lock));
+ return ret;
+}
+
+void *DRM(alloc)(size_t size, int area)
+{
+ void *pt;
+
+ if (!size) {
+ DRM_MEM_ERROR(area, "Allocating 0 bytes\n");
+ return NULL;
+ }
+
+ if (!(pt = malloc(size, DRM(M_DRM), M_NOWAIT))) {
+ DRM_OS_SPINLOCK(&DRM(mem_lock));
+ ++DRM(mem_stats)[area].fail_count;
+ DRM_OS_SPINUNLOCK(&DRM(mem_lock));
+ return NULL;
+ }
+ DRM_OS_SPINLOCK(&DRM(mem_lock));
+ ++DRM(mem_stats)[area].succeed_count;
+ DRM(mem_stats)[area].bytes_allocated += size;
+ DRM_OS_SPINUNLOCK(&DRM(mem_lock));
+ return pt;
+}
+
+void *DRM(realloc)(void *oldpt, size_t oldsize, size_t size, int area)
+{
+ void *pt;
+
+ if (!(pt = DRM(alloc)(size, area))) return NULL;
+ if (oldpt && oldsize) {
+ memcpy(pt, oldpt, oldsize);
+ DRM(free)(oldpt, oldsize, area);
+ }
+ return pt;
+}
+
+char *DRM(strdup)(const char *s, int area)
+{
+ char *pt;
+ int length = s ? strlen(s) : 0;
+
+ if (!(pt = DRM(alloc)(length+1, area))) return NULL;
+ strcpy(pt, s);
+ return pt;
+}
+
+void DRM(strfree)(char *s, int area)
+{
+ unsigned int size;
+
+ if (!s) return;
+
+ size = 1 + strlen(s);
+ DRM(free)((void *)s, size, area);
+}
+
+void DRM(free)(void *pt, size_t size, int area)
+{
+ int alloc_count;
+ int free_count;
+
+ if (!pt) DRM_MEM_ERROR(area, "Attempt to free NULL pointer\n");
+ else free(pt, DRM(M_DRM));
+ DRM_OS_SPINLOCK(&DRM(mem_lock));
+ DRM(mem_stats)[area].bytes_freed += size;
+ free_count = ++DRM(mem_stats)[area].free_count;
+ alloc_count = DRM(mem_stats)[area].succeed_count;
+ DRM_OS_SPINUNLOCK(&DRM(mem_lock));
+ if (free_count > alloc_count) {
+ DRM_MEM_ERROR(area, "Excess frees: %d frees, %d allocs\n",
+ free_count, alloc_count);
+ }
+}
+
+unsigned long DRM(alloc_pages)(int order, int area)
+{
+ vm_offset_t address;
+ unsigned long bytes = PAGE_SIZE << order;
+
+
+ address = (vm_offset_t) contigmalloc(bytes, DRM(M_DRM), M_WAITOK, 0, ~0, 1, 0);
+ if (!address) {
+ DRM_OS_SPINLOCK(&DRM(mem_lock));
+ ++DRM(mem_stats)[area].fail_count;
+ DRM_OS_SPINUNLOCK(&DRM(mem_lock));
+ return 0;
+ }
+ DRM_OS_SPINLOCK(&DRM(mem_lock));
+ ++DRM(mem_stats)[area].succeed_count;
+ DRM(mem_stats)[area].bytes_allocated += bytes;
+ DRM(ram_used) += bytes;
+ DRM_OS_SPINUNLOCK(&DRM(mem_lock));
+
+
+ /* Zero outside the lock */
+ memset((void *)address, 0, bytes);
+
+
+ return address;
+}
+
+void DRM(free_pages)(unsigned long address, int order, int area)
+{
+ unsigned long bytes = PAGE_SIZE << order;
+ int alloc_count;
+ int free_count;
+
+ if (!address) {
+ DRM_MEM_ERROR(area, "Attempt to free address 0\n");
+ } else {
+ contigfree((void *) address, bytes, DRM(M_DRM));
+ }
+
+ DRM_OS_SPINLOCK(&DRM(mem_lock));
+ free_count = ++DRM(mem_stats)[area].free_count;
+ alloc_count = DRM(mem_stats)[area].succeed_count;
+ DRM(mem_stats)[area].bytes_freed += bytes;
+ DRM(ram_used) -= bytes;
+ DRM_OS_SPINUNLOCK(&DRM(mem_lock));
+ if (free_count > alloc_count) {
+ DRM_MEM_ERROR(area,
+ "Excess frees: %d frees, %d allocs\n",
+ free_count, alloc_count);
+ }
+}
+
+void *DRM(ioremap)(unsigned long offset, unsigned long size)
+{
+ void *pt;
+
+ if (!size) {
+ DRM_MEM_ERROR(DRM_MEM_MAPPINGS,
+ "Mapping 0 bytes at 0x%08lx\n", offset);
+ return NULL;
+ }
+
+ if (!(pt = pmap_mapdev(offset, size))) {
+ DRM_OS_SPINLOCK(&DRM(mem_lock));
+ ++DRM(mem_stats)[DRM_MEM_MAPPINGS].fail_count;
+ DRM_OS_SPINUNLOCK(&DRM(mem_lock));
+ return NULL;
+ }
+ DRM_OS_SPINLOCK(&DRM(mem_lock));
+ ++DRM(mem_stats)[DRM_MEM_MAPPINGS].succeed_count;
+ DRM(mem_stats)[DRM_MEM_MAPPINGS].bytes_allocated += size;
+ DRM_OS_SPINUNLOCK(&DRM(mem_lock));
+ return pt;
+}
+
+void DRM(ioremapfree)(void *pt, unsigned long size)
+{
+ int alloc_count;
+ int free_count;
+
+ if (!pt)
+ DRM_MEM_ERROR(DRM_MEM_MAPPINGS,
+ "Attempt to free NULL pointer\n");
+ else
+ pmap_unmapdev((vm_offset_t) pt, size);
+
+ DRM_OS_SPINLOCK(&DRM(mem_lock));
+ DRM(mem_stats)[DRM_MEM_MAPPINGS].bytes_freed += size;
+ free_count = ++DRM(mem_stats)[DRM_MEM_MAPPINGS].free_count;
+ alloc_count = DRM(mem_stats)[DRM_MEM_MAPPINGS].succeed_count;
+ DRM_OS_SPINUNLOCK(&DRM(mem_lock));
+ if (free_count > alloc_count) {
+ DRM_MEM_ERROR(DRM_MEM_MAPPINGS,
+ "Excess frees: %d frees, %d allocs\n",
+ free_count, alloc_count);
+ }
+}
+
+#if __REALLY_HAVE_AGP
+agp_memory *DRM(alloc_agp)(int pages, u32 type)
+{
+ agp_memory *handle;
+
+ if (!pages) {
+ DRM_MEM_ERROR(DRM_MEM_TOTALAGP, "Allocating 0 pages\n");
+ return NULL;
+ }
+
+ if ((handle = DRM(agp_allocate_memory)(pages, type))) {
+ DRM_OS_SPINLOCK(&DRM(mem_lock));
+ ++DRM(mem_stats)[DRM_MEM_TOTALAGP].succeed_count;
+ DRM(mem_stats)[DRM_MEM_TOTALAGP].bytes_allocated
+ += pages << PAGE_SHIFT;
+ DRM_OS_SPINUNLOCK(&DRM(mem_lock));
+ return handle;
+ }
+ DRM_OS_SPINLOCK(&DRM(mem_lock));
+ ++DRM(mem_stats)[DRM_MEM_TOTALAGP].fail_count;
+ DRM_OS_SPINUNLOCK(&DRM(mem_lock));
+ return NULL;
+}
+
+int DRM(free_agp)(agp_memory *handle, int pages)
+{
+ int alloc_count;
+ int free_count;
+
+ if (!handle) {
+ DRM_MEM_ERROR(DRM_MEM_TOTALAGP,
+ "Attempt to free NULL AGP handle\n");
+ DRM_OS_RETURN(EINVAL);
+ }
+
+ if (DRM(agp_free_memory)(handle)) {
+ DRM_OS_SPINLOCK(&DRM(mem_lock));
+ free_count = ++DRM(mem_stats)[DRM_MEM_TOTALAGP].free_count;
+ alloc_count = DRM(mem_stats)[DRM_MEM_TOTALAGP].succeed_count;
+ DRM(mem_stats)[DRM_MEM_TOTALAGP].bytes_freed
+ += pages << PAGE_SHIFT;
+ DRM_OS_SPINUNLOCK(&DRM(mem_lock));
+ if (free_count > alloc_count) {
+ DRM_MEM_ERROR(DRM_MEM_TOTALAGP,
+ "Excess frees: %d frees, %d allocs\n",
+ free_count, alloc_count);
+ }
+ return 0;
+ }
+ DRM_OS_RETURN(EINVAL);
+}
+
+int DRM(bind_agp)(agp_memory *handle, unsigned int start)
+{
+ int retcode;
+ device_t dev = agp_find_device();
+ struct agp_memory_info info;
+
+ if (!dev)
+ return EINVAL;
+
+ if (!handle) {
+ DRM_MEM_ERROR(DRM_MEM_BOUNDAGP,
+ "Attempt to bind NULL AGP handle\n");
+ DRM_OS_RETURN(EINVAL);
+ }
+
+ if (!(retcode = DRM(agp_bind_memory)(handle, start))) {
+ DRM_OS_SPINLOCK(&DRM(mem_lock));
+ ++DRM(mem_stats)[DRM_MEM_BOUNDAGP].succeed_count;
+ agp_memory_info(dev, handle, &info);
+ DRM(mem_stats)[DRM_MEM_BOUNDAGP].bytes_allocated
+ += info.ami_size;
+ DRM_OS_SPINUNLOCK(&DRM(mem_lock));
+ DRM_OS_RETURN(0);
+ }
+ DRM_OS_SPINLOCK(&DRM(mem_lock));
+ ++DRM(mem_stats)[DRM_MEM_BOUNDAGP].fail_count;
+ DRM_OS_SPINUNLOCK(&DRM(mem_lock));
+ DRM_OS_RETURN(retcode);
+}
+
+int DRM(unbind_agp)(agp_memory *handle)
+{
+ int alloc_count;
+ int free_count;
+ int retcode = EINVAL;
+ device_t dev = agp_find_device();
+ struct agp_memory_info info;
+
+ if (!dev)
+ return EINVAL;
+
+ if (!handle) {
+ DRM_MEM_ERROR(DRM_MEM_BOUNDAGP,
+ "Attempt to unbind NULL AGP handle\n");
+ DRM_OS_RETURN(retcode);
+ }
+
+ agp_memory_info(dev, handle, &info);
+
+ if ((retcode = DRM(agp_unbind_memory)(handle)))
+ DRM_OS_RETURN(retcode);
+
+ DRM_OS_SPINLOCK(&DRM(mem_lock));
+ free_count = ++DRM(mem_stats)[DRM_MEM_BOUNDAGP].free_count;
+ alloc_count = DRM(mem_stats)[DRM_MEM_BOUNDAGP].succeed_count;
+ DRM(mem_stats)[DRM_MEM_BOUNDAGP].bytes_freed
+ += info.ami_size;
+ DRM_OS_SPINUNLOCK(&DRM(mem_lock));
+ if (free_count > alloc_count) {
+ DRM_MEM_ERROR(DRM_MEM_BOUNDAGP,
+ "Excess frees: %d frees, %d allocs\n",
+ free_count, alloc_count);
+ }
+ DRM_OS_RETURN(retcode);
+}
+#endif
diff --git a/bsd/drm_os_freebsd.h b/bsd/drm_os_freebsd.h
new file mode 100644
index 00000000..72c5baf6
--- /dev/null
+++ b/bsd/drm_os_freebsd.h
@@ -0,0 +1,375 @@
+#include <sys/param.h>
+#include <sys/queue.h>
+#include <sys/malloc.h>
+#include <sys/kernel.h>
+#include <sys/module.h>
+#include <sys/systm.h>
+#include <sys/conf.h>
+#include <sys/stat.h>
+#include <sys/proc.h>
+#include <sys/lock.h>
+#include <sys/fcntl.h>
+#include <sys/uio.h>
+#include <sys/filio.h>
+#include <sys/sysctl.h>
+#include <sys/select.h>
+#include <vm/vm.h>
+#include <vm/pmap.h>
+#include <machine/pmap.h>
+#if __FreeBSD_version >= 500000
+#include <sys/selinfo.h>
+#endif
+#include <sys/bus.h>
+#if __FreeBSD_version >= 400005
+#include <sys/taskqueue.h>
+#endif
+#if __FreeBSD_version >= 500000
+#include <sys/mutex.h>
+#endif
+
+#if __FreeBSD_version >= 400006
+#define __REALLY_HAVE_AGP __HAVE_AGP
+#endif
+
+#define __REALLY_HAVE_MTRR 0
+#define __REALLY_HAVE_SG 0
+
+#if __REALLY_HAVE_AGP
+#include <pci/agpvar.h>
+#endif
+
+#define DRM_TIME_SLICE (hz/20) /* Time slice for GLXContexts */
+
+#define DRM_DEV_MODE (S_IRUSR|S_IWUSR|S_IRGRP|S_IWGRP)
+#define DRM_DEV_UID 0
+#define DRM_DEV_GID 0
+
+
+#if __FreeBSD_version >= 500000
+#define DRM_OS_SPINTYPE struct mtx
+#define DRM_OS_SPININIT(l,name) mtx_init(&l, name, MTX_DEF)
+#define DRM_OS_SPINLOCK(l) mtx_lock(l)
+#define DRM_OS_SPINUNLOCK(u) mtx_unlock(u);
+#define DRM_OS_LOCK lockmgr(&dev->dev_lock, LK_EXCLUSIVE, 0, curthread)
+#define DRM_OS_UNLOCK lockmgr(&dev->dev_lock, LK_RELEASE, 0, curthread)
+#define DRM_OS_CURPROC curthread
+#define DRM_OS_STRUCTPROC struct thread
+#define DRM_OS_CURRENTPID curthread->td_proc->p_pid
+#define DRM_OS_IOCTL dev_t kdev, u_long cmd, caddr_t data, int flags, struct thread *p
+#define DRM_OS_CHECKSUSER suser(p->td_proc)
+#else
+#define DRM_OS_CURPROC curproc
+#define DRM_OS_STRUCTPROC struct proc
+#define DRM_OS_SPINTYPE struct simplelock
+#define DRM_OS_SPININIT(l,name) simple_lock_init(&l)
+#define DRM_OS_SPINLOCK(l) simple_lock(l)
+#define DRM_OS_SPINUNLOCK(u) simple_unlock(u);
+#define DRM_OS_IOCTL dev_t kdev, u_long cmd, caddr_t data, int flags, struct proc *p
+#define DRM_OS_LOCK lockmgr(&dev->dev_lock, LK_EXCLUSIVE, 0, curproc)
+#define DRM_OS_UNLOCK lockmgr(&dev->dev_lock, LK_RELEASE, 0, curproc)
+#define DRM_OS_CURRENTPID curproc->p_pid
+#define DRM_OS_CHECKSUSER suser(p)
+#endif
+
+#define DRM_OS_TASKQUEUE_ARGS void *dev, int pending
+#define DRM_OS_IRQ_ARGS void *device
+#define DRM_OS_DEVICE drm_device_t *dev = kdev->si_drv1
+#define DRM_OS_MALLOC(size) malloc( size, DRM(M_DRM), M_NOWAIT )
+#define DRM_OS_FREE(pt) free( pt, DRM(M_DRM) )
+#define DRM_OS_VTOPHYS(addr) vtophys(addr)
+
+#define DRM_OS_PRIV \
+ drm_file_t *priv = (drm_file_t *) DRM(find_file_by_proc)(dev, p); \
+ if (!priv) { \
+ DRM_DEBUG("can't find authenticator\n"); \
+ return EINVAL; \
+ }
+
+#define DRM_OS_DELAY( udelay ) \
+do { \
+ struct timeval tv1, tv2; \
+ microtime(&tv1); \
+ do { \
+ microtime(&tv2); \
+ } \
+ while (((tv2.tv_sec-tv1.tv_sec)*1000000 + tv2.tv_usec - tv1.tv_usec) < udelay ); \
+} while (0)
+
+#define DRM_OS_RETURN(v) return v;
+
+
+#define DRM_OS_KRNTOUSR(arg1, arg2, arg3) \
+ *arg1 = arg2
+#define DRM_OS_KRNFROMUSR(arg1, arg2, arg3) \
+ arg1 = *arg2
+#define DRM_OS_COPYTOUSR(arg1, arg2, arg3) \
+ copyout(arg2, arg1, arg3)
+#define DRM_OS_COPYFROMUSR(arg1, arg2, arg3) \
+ copyin(arg2, arg1, arg3)
+
+#define DRM_OS_READMEMORYBARRIER \
+{ \
+ int xchangeDummy; \
+ DRM_DEBUG("%s\n", __FUNCTION__); \
+ __asm__ volatile(" push %%eax ; xchg %%eax, %0 ; pop %%eax" : : "m" (xchangeDummy)); \
+ __asm__ volatile(" push %%eax ; push %%ebx ; push %%ecx ; push %%edx ;" \
+ " movl $0,%%eax ; cpuid ; pop %%edx ; pop %%ecx ; pop %%ebx ;" \
+ " pop %%eax" : /* no outputs */ : /* no inputs */ ); \
+} while (0);
+
+#define DRM_OS_WRITEMEMORYBARRIER DRM_OS_READMEMORYBARRIER
+
+#define DRM_OS_WAKEUP(w) wakeup(w)
+#define DRM_OS_WAKEUP_INT(w) wakeup(w)
+
+#define PAGE_ALIGN(addr) (((addr)+PAGE_SIZE-1)&PAGE_MASK)
+
+#define malloctype DRM(M_DRM)
+/* The macros confliced in the MALLOC_DEFINE */
+MALLOC_DECLARE(malloctype);
+#undef malloctype
+
+typedef struct drm_chipinfo
+{
+ int vendor;
+ int device;
+ int supported;
+ char *name;
+} drm_chipinfo_t;
+
+typedef unsigned long atomic_t;
+typedef u_int32_t cycles_t;
+typedef u_int32_t spinlock_t;
+typedef u_int32_t u32;
+typedef u_int16_t u16;
+typedef u_int8_t u8;
+#define atomic_set(p, v) (*(p) = (v))
+#define atomic_read(p) (*(p))
+#define atomic_inc(p) atomic_add_long(p, 1)
+#define atomic_dec(p) atomic_subtract_long(p, 1)
+#define atomic_add(n, p) atomic_add_long(p, n)
+#define atomic_sub(n, p) atomic_subtract_long(p, n)
+
+/* Fake this */
+static __inline unsigned int
+test_and_set_bit(int b, volatile unsigned long *p)
+{
+ int s = splhigh();
+ unsigned int m = 1<<b;
+ unsigned int r = *p & m;
+ *p |= m;
+ splx(s);
+ return r;
+}
+
+static __inline void
+clear_bit(int b, volatile unsigned long *p)
+{
+ atomic_clear_long(p + (b >> 5), 1 << (b & 0x1f));
+}
+
+static __inline void
+set_bit(int b, volatile unsigned long *p)
+{
+ atomic_set_long(p + (b >> 5), 1 << (b & 0x1f));
+}
+
+static __inline int
+test_bit(int b, volatile unsigned long *p)
+{
+ return p[b >> 5] & (1 << (b & 0x1f));
+}
+
+static __inline int
+find_first_zero_bit(volatile unsigned long *p, int max)
+{
+ int b;
+
+ for (b = 0; b < max; b += 32) {
+ if (p[b >> 5] != ~0) {
+ for (;;) {
+ if ((p[b >> 5] & (1 << (b & 0x1f))) == 0)
+ return b;
+ b++;
+ }
+ }
+ }
+ return max;
+}
+
+#define spldrm() spltty()
+
+#define memset(p, v, s) bzero(p, s)
+
+/*
+ * Fake out the module macros for versions of FreeBSD where they don't
+ * exist.
+ */
+#if (__FreeBSD_version < 500002 && __FreeBSD_version > 500000) || __FreeBSD_version < 420000
+/* FIXME: again, what's the exact date? */
+#define MODULE_VERSION(a,b) struct __hack
+#define MODULE_DEPEND(a,b,c,d,e) struct __hack
+
+#endif
+
+#define __drm_dummy_lock(lock) (*(__volatile__ unsigned int *)lock)
+#define _DRM_CAS(lock,old,new,__ret) \
+ do { \
+ int __dummy; /* Can't mark eax as clobbered */ \
+ __asm__ __volatile__( \
+ "lock ; cmpxchg %4,%1\n\t" \
+ "setnz %0" \
+ : "=d" (__ret), \
+ "=m" (__drm_dummy_lock(lock)), \
+ "=a" (__dummy) \
+ : "2" (old), \
+ "r" (new)); \
+ } while (0)
+
+/* Redefinitions to make templating easy */
+#define wait_queue_head_t long
+#define agp_memory void
+#define jiffies ticks
+
+ /* Macros to make printf easier */
+#define DRM_ERROR(fmt, arg...) \
+ printf("error: " "[" DRM_NAME ":" __FUNCTION__ "] *ERROR* " fmt , ##arg)
+#define DRM_MEM_ERROR(area, fmt, arg...) \
+ printf("error: " "[" DRM_NAME ":" __FUNCTION__ ":%s] *ERROR* " fmt , \
+ DRM(mem_stats)[area].name , ##arg)
+#define DRM_INFO(fmt, arg...) printf("info: " "[" DRM_NAME "] " fmt , ##arg)
+
+#if DRM_DEBUG_CODE
+#define DRM_DEBUG(fmt, arg...) \
+ do { \
+ if (DRM(flags) & DRM_FLAG_DEBUG) \
+ printf("[" DRM_NAME ":" __FUNCTION__ "] " fmt , \
+ ##arg); \
+ } while (0)
+#else
+#define DRM_DEBUG(fmt, arg...) do { } while (0)
+#endif
+
+#define DRM_PROC_LIMIT (PAGE_SIZE-80)
+
+#if (__FreeBSD_version >= 500000) || ((__FreeBSD_version < 500000) && (__FreeBSD_version >= 410002))
+#define DRM_SYSCTL_HANDLER_ARGS (SYSCTL_HANDLER_ARGS)
+#else
+#define DRM_SYSCTL_HANDLER_ARGS SYSCTL_HANDLER_ARGS
+#endif
+
+#define DRM_SYSCTL_PRINT(fmt, arg...) \
+ snprintf(buf, sizeof(buf), fmt, ##arg); \
+ error = SYSCTL_OUT(req, buf, strlen(buf)); \
+ if (error) return error;
+
+#define DRM_SYSCTL_PRINT_RET(ret, fmt, arg...) \
+ snprintf(buf, sizeof(buf), fmt, ##arg); \
+ error = SYSCTL_OUT(req, buf, strlen(buf)); \
+ if (error) { ret; return error; }
+
+
+#define DRM_FIND_MAP(dest, o) \
+ do { \
+ drm_map_list_entry_t *listentry; \
+ TAILQ_FOREACH(listentry, dev->maplist, link) { \
+ if ( listentry->map->offset == o ) { \
+ dest = listentry->map; \
+ break; \
+ } \
+ } \
+ } while (0)
+
+
+/* Internal functions */
+
+/* drm_drv.h */
+extern d_ioctl_t DRM(ioctl);
+extern d_ioctl_t DRM(lock);
+extern d_ioctl_t DRM(unlock);
+extern d_open_t DRM(open);
+extern d_close_t DRM(close);
+extern d_read_t DRM(read);
+extern d_write_t DRM(write);
+extern d_poll_t DRM(poll);
+extern d_mmap_t DRM(mmap);
+extern int DRM(open_helper)(dev_t kdev, int flags, int fmt,
+ DRM_OS_STRUCTPROC *p, drm_device_t *dev);
+extern drm_file_t *DRM(find_file_by_proc)(drm_device_t *dev,
+ DRM_OS_STRUCTPROC *p);
+
+/* Misc. IOCTL support (drm_ioctl.h) */
+extern d_ioctl_t DRM(irq_busid);
+extern d_ioctl_t DRM(getunique);
+extern d_ioctl_t DRM(setunique);
+extern d_ioctl_t DRM(getmap);
+extern d_ioctl_t DRM(getclient);
+extern d_ioctl_t DRM(getstats);
+
+/* Context IOCTL support (drm_context.h) */
+extern d_ioctl_t DRM(resctx);
+extern d_ioctl_t DRM(addctx);
+extern d_ioctl_t DRM(modctx);
+extern d_ioctl_t DRM(getctx);
+extern d_ioctl_t DRM(switchctx);
+extern d_ioctl_t DRM(newctx);
+extern d_ioctl_t DRM(rmctx);
+extern d_ioctl_t DRM(setsareactx);
+extern d_ioctl_t DRM(getsareactx);
+
+/* Drawable IOCTL support (drm_drawable.h) */
+extern d_ioctl_t DRM(adddraw);
+extern d_ioctl_t DRM(rmdraw);
+
+/* Authentication IOCTL support (drm_auth.h) */
+extern d_ioctl_t DRM(getmagic);
+extern d_ioctl_t DRM(authmagic);
+
+/* Locking IOCTL support (drm_lock.h) */
+extern d_ioctl_t DRM(block);
+extern d_ioctl_t DRM(unblock);
+extern d_ioctl_t DRM(finish);
+
+/* Buffer management support (drm_bufs.h) */
+extern d_ioctl_t DRM(addmap);
+extern d_ioctl_t DRM(rmmap);
+#if __HAVE_DMA
+extern d_ioctl_t DRM(addbufs_agp);
+extern d_ioctl_t DRM(addbufs_pci);
+extern d_ioctl_t DRM(addbufs_sg);
+extern d_ioctl_t DRM(addbufs);
+extern d_ioctl_t DRM(infobufs);
+extern d_ioctl_t DRM(markbufs);
+extern d_ioctl_t DRM(freebufs);
+extern d_ioctl_t DRM(mapbufs);
+#endif
+
+/* Memory management support (drm_memory.h) */
+extern int DRM(mem_info) DRM_SYSCTL_HANDLER_ARGS;
+
+/* DMA support (drm_dma.h) */
+#if __HAVE_DMA_IRQ
+extern d_ioctl_t DRM(control);
+#endif
+
+/* AGP/GART support (drm_agpsupport.h) */
+#if __REALLY_HAVE_AGP
+extern d_ioctl_t DRM(agp_acquire);
+extern d_ioctl_t DRM(agp_release);
+extern d_ioctl_t DRM(agp_enable);
+extern d_ioctl_t DRM(agp_info);
+extern d_ioctl_t DRM(agp_alloc);
+extern d_ioctl_t DRM(agp_free);
+extern d_ioctl_t DRM(agp_unbind);
+extern d_ioctl_t DRM(agp_bind);
+#endif
+
+/* Scatter Gather Support (drm_scatter.h) */
+#if __HAVE_SG
+extern d_ioctl_t DRM(sg_alloc);
+extern d_ioctl_t DRM(sg_free);
+#endif
+
+/* SysCtl Support (drm_sysctl.h) */
+extern int DRM(sysctl_init)(drm_device_t *dev);
+extern int DRM(sysctl_cleanup)(drm_device_t *dev);
diff --git a/bsd/drm_scatter.h b/bsd/drm_scatter.h
new file mode 100644
index 00000000..a6b8275f
--- /dev/null
+++ b/bsd/drm_scatter.h
@@ -0,0 +1,237 @@
+/* drm_scatter.h -- IOCTLs to manage scatter/gather memory -*- linux-c -*-
+ * Created: Mon Dec 18 23:20:54 2000 by gareth@valinux.com
+ *
+ * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ * Authors:
+ * Gareth Hughes <gareth@valinux.com>
+ */
+
+#define __NO_VERSION__
+#include <linux/config.h>
+#include <linux/vmalloc.h>
+#include "drmP.h"
+
+#define DEBUG_SCATTER 0
+
+void DRM(sg_cleanup)( drm_sg_mem_t *entry )
+{
+ struct page *page;
+ int i;
+
+ for ( i = 0 ; i < entry->pages ; i++ ) {
+ page = entry->pagelist[i];
+ if ( page )
+ ClearPageReserved( page );
+ }
+
+ vfree( entry->virtual );
+
+ DRM(free)( entry->busaddr,
+ entry->pages * sizeof(*entry->busaddr),
+ DRM_MEM_PAGES );
+ DRM(free)( entry->pagelist,
+ entry->pages * sizeof(*entry->pagelist),
+ DRM_MEM_PAGES );
+ DRM(free)( entry,
+ sizeof(*entry),
+ DRM_MEM_SGLISTS );
+}
+
+int DRM(sg_alloc)( struct inode *inode, struct file *filp,
+ unsigned int cmd, unsigned long arg )
+{
+ drm_file_t *priv = filp->private_data;
+ drm_device_t *dev = priv->dev;
+ drm_scatter_gather_t request;
+ drm_sg_mem_t *entry;
+ unsigned long pages, i, j;
+ pgd_t *pgd;
+ pmd_t *pmd;
+ pte_t *pte;
+
+ DRM_DEBUG( "%s\n", __FUNCTION__ );
+
+ if ( dev->sg )
+ return -EINVAL;
+
+ if ( copy_from_user( &request,
+ (drm_scatter_gather_t *)arg,
+ sizeof(request) ) )
+ return -EFAULT;
+
+ entry = DRM(alloc)( sizeof(*entry), DRM_MEM_SGLISTS );
+ if ( !entry )
+ return -ENOMEM;
+
+ memset( entry, 0, sizeof(*entry) );
+
+ pages = (request.size + PAGE_SIZE - 1) / PAGE_SIZE;
+ DRM_DEBUG( "sg size=%ld pages=%ld\n", request.size, pages );
+
+ entry->pages = pages;
+ entry->pagelist = DRM(alloc)( pages * sizeof(*entry->pagelist),
+ DRM_MEM_PAGES );
+ if ( !entry->pagelist ) {
+ DRM(free)( entry, sizeof(*entry), DRM_MEM_SGLISTS );
+ return -ENOMEM;
+ }
+
+ entry->busaddr = DRM(alloc)( pages * sizeof(*entry->busaddr),
+ DRM_MEM_PAGES );
+ if ( !entry->busaddr ) {
+ DRM(free)( entry->pagelist,
+ entry->pages * sizeof(*entry->pagelist),
+ DRM_MEM_PAGES );
+ DRM(free)( entry,
+ sizeof(*entry),
+ DRM_MEM_SGLISTS );
+ return -ENOMEM;
+ }
+ memset( (void *)entry->busaddr, 0, pages * sizeof(*entry->busaddr) );
+
+ entry->virtual = vmalloc_32( pages << PAGE_SHIFT );
+ if ( !entry->virtual ) {
+ DRM(free)( entry->busaddr,
+ entry->pages * sizeof(*entry->busaddr),
+ DRM_MEM_PAGES );
+ DRM(free)( entry->pagelist,
+ entry->pages * sizeof(*entry->pagelist),
+ DRM_MEM_PAGES );
+ DRM(free)( entry,
+ sizeof(*entry),
+ DRM_MEM_SGLISTS );
+ return -ENOMEM;
+ }
+
+ /* This also forces the mapping of COW pages, so our page list
+ * will be valid. Please don't remove it...
+ */
+ memset( entry->virtual, 0, pages << PAGE_SHIFT );
+
+ entry->handle = (unsigned long)entry->virtual;
+
+ DRM_DEBUG( "sg alloc handle = %08lx\n", entry->handle );
+ DRM_DEBUG( "sg alloc virtual = %p\n", entry->virtual );
+
+ for ( i = entry->handle, j = 0 ; j < pages ; i += PAGE_SIZE, j++ ) {
+ pgd = pgd_offset_k( i );
+ if ( !pgd_present( *pgd ) )
+ goto failed;
+
+ pmd = pmd_offset( pgd, i );
+ if ( !pmd_present( *pmd ) )
+ goto failed;
+
+ pte = pte_offset( pmd, i );
+ if ( !pte_present( *pte ) )
+ goto failed;
+
+ entry->pagelist[j] = pte_page( *pte );
+
+ SetPageReserved( entry->pagelist[j] );
+ }
+
+ request.handle = entry->handle;
+
+ if ( copy_to_user( (drm_scatter_gather_t *)arg,
+ &request,
+ sizeof(request) ) ) {
+ DRM(sg_cleanup)( entry );
+ return -EFAULT;
+ }
+
+ dev->sg = entry;
+
+#if DEBUG_SCATTER
+ /* Verify that each page points to its virtual address, and vice
+ * versa.
+ */
+ {
+ int error = 0;
+
+ for ( i = 0 ; i < pages ; i++ ) {
+ unsigned long *tmp;
+
+ tmp = page_address( entry->pagelist[i] );
+ for ( j = 0 ;
+ j < PAGE_SIZE / sizeof(unsigned long) ;
+ j++, tmp++ ) {
+ *tmp = 0xcafebabe;
+ }
+ tmp = (unsigned long *)((u8 *)entry->virtual +
+ (PAGE_SIZE * i));
+ for( j = 0 ;
+ j < PAGE_SIZE / sizeof(unsigned long) ;
+ j++, tmp++ ) {
+ if ( *tmp != 0xcafebabe && error == 0 ) {
+ error = 1;
+ DRM_ERROR( "Scatter allocation error, "
+ "pagelist does not match "
+ "virtual mapping\n" );
+ }
+ }
+ tmp = page_address( entry->pagelist[i] );
+ for(j = 0 ;
+ j < PAGE_SIZE / sizeof(unsigned long) ;
+ j++, tmp++) {
+ *tmp = 0;
+ }
+ }
+ if (error == 0)
+ DRM_ERROR( "Scatter allocation matches pagelist\n" );
+ }
+#endif
+
+ return 0;
+
+ failed:
+ DRM(sg_cleanup)( entry );
+ return -ENOMEM;
+}
+
+int DRM(sg_free)( struct inode *inode, struct file *filp,
+ unsigned int cmd, unsigned long arg )
+{
+ drm_file_t *priv = filp->private_data;
+ drm_device_t *dev = priv->dev;
+ drm_scatter_gather_t request;
+ drm_sg_mem_t *entry;
+
+ if ( copy_from_user( &request,
+ (drm_scatter_gather_t *)arg,
+ sizeof(request) ) )
+ return -EFAULT;
+
+ entry = dev->sg;
+ dev->sg = NULL;
+
+ if ( !entry || entry->handle != request.handle )
+ return -EINVAL;
+
+ DRM_DEBUG( "sg free virtual = %p\n", entry->virtual );
+
+ DRM(sg_cleanup)( entry );
+
+ return 0;
+}
diff --git a/bsd/drm_sysctl.h b/bsd/drm_sysctl.h
new file mode 100644
index 00000000..02e4b28d
--- /dev/null
+++ b/bsd/drm_sysctl.h
@@ -0,0 +1,523 @@
+SYSCTL_NODE(_hw, OID_AUTO, dri, CTLFLAG_RW, 0, "DRI Graphics");
+
+static int DRM(name_info)DRM_SYSCTL_HANDLER_ARGS;
+static int DRM(vm_info)DRM_SYSCTL_HANDLER_ARGS;
+static int DRM(clients_info)DRM_SYSCTL_HANDLER_ARGS;
+static int DRM(queues_info)DRM_SYSCTL_HANDLER_ARGS;
+static int DRM(bufs_info)DRM_SYSCTL_HANDLER_ARGS;
+#if DRM_DEBUG_CODExx
+static int DRM(vma_info)DRM_SYSCTL_HANDLER_ARGS;
+#endif
+#if DRM_DMA_HISTOGRAM
+static int DRM(histo_info)DRM_SYSCTL_HANDLER_ARGS;
+#endif
+
+struct DRM(sysctl_list) {
+ const char *name;
+ int (*f) DRM_SYSCTL_HANDLER_ARGS;
+} DRM(sysctl_list)[] = {
+ { "name", DRM(name_info) },
+ { "mem", DRM(mem_info) },
+ { "vm", DRM(vm_info) },
+ { "clients", DRM(clients_info) },
+ { "queues", DRM(queues_info) },
+ { "bufs", DRM(bufs_info) },
+#if DRM_DEBUG_CODExx
+ { "vma", DRM(vma_info) },
+#endif
+#if DRM_DMA_HISTOGRAM
+ { "histo", drm_histo_info) },
+#endif
+};
+#define DRM_SYSCTL_ENTRIES (sizeof(DRM(sysctl_list))/sizeof(DRM(sysctl_list)[0]))
+
+struct drm_sysctl_info {
+ struct sysctl_oid oids[DRM_SYSCTL_ENTRIES + 1];
+ struct sysctl_oid_list list;
+ char name[2];
+};
+
+int DRM(sysctl_init)(drm_device_t *dev)
+{
+ struct drm_sysctl_info *info;
+ struct sysctl_oid *oid;
+ struct sysctl_oid *top;
+ int i;
+
+ /* Find the next free slot under hw.graphics */
+ i = 0;
+ SLIST_FOREACH(oid, &sysctl__hw_dri_children, oid_link) {
+ if (i <= oid->oid_arg2)
+ i = oid->oid_arg2 + 1;
+ }
+
+ info = DRM(alloc)(sizeof *info, DRM_MEM_DRIVER);
+ dev->sysctl = info;
+
+ /* Construct the node under hw.graphics */
+ info->name[0] = '0' + i;
+ info->name[1] = 0;
+ oid = &info->oids[DRM_SYSCTL_ENTRIES];
+ bzero(oid, sizeof(*oid));
+ oid->oid_parent = &sysctl__hw_dri_children;
+ oid->oid_number = OID_AUTO;
+ oid->oid_kind = CTLTYPE_NODE | CTLFLAG_RW;
+ oid->oid_arg1 = &info->list;
+ oid->oid_arg2 = i;
+ oid->oid_name = info->name;
+ oid->oid_handler = 0;
+ oid->oid_fmt = "N";
+ SLIST_INIT(&info->list);
+ sysctl_register_oid(oid);
+ top = oid;
+
+ for (i = 0; i < DRM_SYSCTL_ENTRIES; i++) {
+ oid = &info->oids[i];
+ bzero(oid, sizeof(*oid));
+ oid->oid_parent = top->oid_arg1;
+ oid->oid_number = OID_AUTO;
+ oid->oid_kind = CTLTYPE_INT | CTLFLAG_RD;
+ oid->oid_arg1 = dev;
+ oid->oid_arg2 = 0;
+ oid->oid_name = DRM(sysctl_list)[i].name;
+ oid->oid_handler = DRM(sysctl_list[)i].f;
+ oid->oid_fmt = "A";
+ sysctl_register_oid(oid);
+ }
+
+ return 0;
+}
+
+int DRM(sysctl_cleanup)(drm_device_t *dev)
+{
+ int i;
+
+ DRM_DEBUG("dev->sysctl=%p\n", dev->sysctl);
+ for (i = 0; i < DRM_SYSCTL_ENTRIES + 1; i++)
+ sysctl_unregister_oid(&dev->sysctl->oids[i]);
+
+ DRM(free)(dev->sysctl, sizeof *dev->sysctl, DRM_MEM_DRIVER);
+ dev->sysctl = NULL;
+
+ return 0;
+}
+
+static int DRM(name_info)DRM_SYSCTL_HANDLER_ARGS
+{
+ drm_device_t *dev = arg1;
+ char buf[128];
+ int error;
+
+ if (dev->unique) {
+ DRM_SYSCTL_PRINT("%s 0x%x %s\n",
+ dev->name, dev2udev(dev->devnode), dev->unique);
+ } else {
+ DRM_SYSCTL_PRINT("%s 0x%x\n", dev->name, dev2udev(dev->devnode));
+ }
+
+ SYSCTL_OUT(req, "", 1);
+
+ return 0;
+}
+
+static int DRM(_vm_info)DRM_SYSCTL_HANDLER_ARGS
+{
+ drm_device_t *dev = arg1;
+ drm_map_t *map;
+ drm_map_list_entry_t *listentry;
+ const char *types[] = { "FB", "REG", "SHM" };
+ const char *type;
+ int i=0;
+ char buf[128];
+ int error;
+
+ DRM_SYSCTL_PRINT("slot offset size type flags "
+ "address mtrr\n\n");
+ error = SYSCTL_OUT(req, buf, strlen(buf));
+ if (error) return error;
+
+ if (dev->maplist != NULL) {
+ TAILQ_FOREACH(listentry, dev->maplist, link) {
+ map = listentry->map;
+ if (map->type < 0 || map->type > 2) type = "??";
+ else type = types[map->type];
+ DRM_SYSCTL_PRINT("%4d 0x%08lx 0x%08lx %4.4s 0x%02x 0x%08lx ",
+ i,
+ map->offset,
+ map->size,
+ type,
+ map->flags,
+ (unsigned long)map->handle);
+ if (map->mtrr < 0) {
+ DRM_SYSCTL_PRINT("none\n");
+ } else {
+ DRM_SYSCTL_PRINT("%4d\n", map->mtrr);
+ }
+ i++;
+ }
+ }
+ SYSCTL_OUT(req, "", 1);
+
+ return 0;
+}
+
+static int DRM(vm_info)DRM_SYSCTL_HANDLER_ARGS
+{
+ drm_device_t *dev = arg1;
+ int ret;
+
+ DRM_OS_LOCK;
+ ret = DRM(_vm_info)(oidp, arg1, arg2, req);
+ DRM_OS_UNLOCK;
+
+ return ret;
+}
+
+
+static int DRM(_queues_info)DRM_SYSCTL_HANDLER_ARGS
+{
+ drm_device_t *dev = arg1;
+ int i;
+ drm_queue_t *q;
+ char buf[128];
+ int error;
+
+ DRM_SYSCTL_PRINT(" ctx/flags use fin"
+ " blk/rw/rwf wait flushed queued"
+ " locks\n\n");
+ for (i = 0; i < dev->queue_count; i++) {
+ q = dev->queuelist[i];
+ atomic_inc(&q->use_count);
+ DRM_SYSCTL_PRINT_RET(atomic_dec(&q->use_count),
+ "%5d/0x%03x %5ld %5ld"
+ " %5ld/%c%c/%c%c%c %5d %10ld %10ld %10ld\n",
+ i,
+ q->flags,
+ atomic_read(&q->use_count),
+ atomic_read(&q->finalization),
+ atomic_read(&q->block_count),
+ atomic_read(&q->block_read) ? 'r' : '-',
+ atomic_read(&q->block_write) ? 'w' : '-',
+ q->read_queue ? 'r':'-',
+ q->write_queue ? 'w':'-',
+ q->flush_queue ? 'f':'-',
+ DRM_BUFCOUNT(&q->waitlist),
+ atomic_read(&q->total_flushed),
+ atomic_read(&q->total_queued),
+ atomic_read(&q->total_locks));
+ atomic_dec(&q->use_count);
+ }
+
+ SYSCTL_OUT(req, "", 1);
+ return 0;
+}
+
+static int DRM(queues_info) DRM_SYSCTL_HANDLER_ARGS
+{
+ drm_device_t *dev = arg1;
+ int ret;
+
+ DRM_OS_LOCK;
+ ret = DRM(_queues_info)(oidp, arg1, arg2, req);
+ DRM_OS_UNLOCK;
+ return ret;
+}
+
+/* drm_bufs_info is called whenever a process reads
+ hw.dri.0.bufs. */
+
+static int DRM(_bufs_info) DRM_SYSCTL_HANDLER_ARGS
+{
+ drm_device_t *dev = arg1;
+ drm_device_dma_t *dma = dev->dma;
+ int i;
+ char buf[128];
+ int error;
+
+ if (!dma) return 0;
+ DRM_SYSCTL_PRINT(" o size count free segs pages kB\n\n");
+ for (i = 0; i <= DRM_MAX_ORDER; i++) {
+ if (dma->bufs[i].buf_count)
+ DRM_SYSCTL_PRINT("%2d %8d %5d %5ld %5d %5d %5d\n",
+ i,
+ dma->bufs[i].buf_size,
+ dma->bufs[i].buf_count,
+ atomic_read(&dma->bufs[i]
+ .freelist.count),
+ dma->bufs[i].seg_count,
+ dma->bufs[i].seg_count
+ *(1 << dma->bufs[i].page_order),
+ (dma->bufs[i].seg_count
+ * (1 << dma->bufs[i].page_order))
+ * PAGE_SIZE / 1024);
+ }
+ DRM_SYSCTL_PRINT("\n");
+ for (i = 0; i < dma->buf_count; i++) {
+ if (i && !(i%32)) DRM_SYSCTL_PRINT("\n");
+ DRM_SYSCTL_PRINT(" %d", dma->buflist[i]->list);
+ }
+ DRM_SYSCTL_PRINT("\n");
+
+ SYSCTL_OUT(req, "", 1);
+ return 0;
+}
+
+static int DRM(bufs_info) DRM_SYSCTL_HANDLER_ARGS
+{
+ drm_device_t *dev = arg1;
+ int ret;
+
+ DRM_OS_LOCK;
+ ret = DRM(_bufs_info)(oidp, arg1, arg2, req);
+ DRM_OS_UNLOCK;
+ return ret;
+}
+
+
+static int DRM(_clients_info) DRM_SYSCTL_HANDLER_ARGS
+{
+ drm_device_t *dev = arg1;
+ drm_file_t *priv;
+ char buf[128];
+ int error;
+
+ DRM_SYSCTL_PRINT("a dev pid uid magic ioctls\n\n");
+ TAILQ_FOREACH(priv, &dev->files, link) {
+ DRM_SYSCTL_PRINT("%c %3d %5d %5d %10u %10lu\n",
+ priv->authenticated ? 'y' : 'n',
+ priv->minor,
+ priv->pid,
+ priv->uid,
+ priv->magic,
+ priv->ioctl_count);
+ }
+
+ SYSCTL_OUT(req, "", 1);
+ return 0;
+}
+
+static int DRM(clients_info)DRM_SYSCTL_HANDLER_ARGS
+{
+ drm_device_t *dev = arg1;
+ int ret;
+
+ DRM_OS_LOCK;
+ ret = DRM(_clients_info)(oidp, arg1, arg2, req);
+ DRM_OS_UNLOCK;
+ return ret;
+}
+
+#if DRM_DEBUG_CODExx
+
+static int DRM(_vma_info)DRM_SYSCTL_HANDLER_ARGS
+{
+ drm_device_t *dev = arg1;
+ drm_vma_entry_t *pt;
+ pgd_t *pgd;
+ pmd_t *pmd;
+ pte_t *pte;
+ unsigned long i;
+ struct vm_area_struct *vma;
+ unsigned long address;
+#if defined(__i386__)
+ unsigned int pgprot;
+#endif
+ char buf[128];
+ int error;
+
+ DRM_SYSCTL_PRINT("vma use count: %d, high_memory = %p, 0x%08lx\n",
+ atomic_read(&dev->vma_count),
+ high_memory, virt_to_phys(high_memory));
+ for (pt = dev->vmalist; pt; pt = pt->next) {
+ if (!(vma = pt->vma)) continue;
+ DRM_SYSCTL_PRINT("\n%5d 0x%08lx-0x%08lx %c%c%c%c%c%c 0x%08lx",
+ pt->pid,
+ vma->vm_start,
+ vma->vm_end,
+ vma->vm_flags & VM_READ ? 'r' : '-',
+ vma->vm_flags & VM_WRITE ? 'w' : '-',
+ vma->vm_flags & VM_EXEC ? 'x' : '-',
+ vma->vm_flags & VM_MAYSHARE ? 's' : 'p',
+ vma->vm_flags & VM_LOCKED ? 'l' : '-',
+ vma->vm_flags & VM_IO ? 'i' : '-',
+ vma->vm_offset );
+#if defined(__i386__)
+ pgprot = pgprot_val(vma->vm_page_prot);
+ DRM_SYSCTL_PRINT(" %c%c%c%c%c%c%c%c%c",
+ pgprot & _PAGE_PRESENT ? 'p' : '-',
+ pgprot & _PAGE_RW ? 'w' : 'r',
+ pgprot & _PAGE_USER ? 'u' : 's',
+ pgprot & _PAGE_PWT ? 't' : 'b',
+ pgprot & _PAGE_PCD ? 'u' : 'c',
+ pgprot & _PAGE_ACCESSED ? 'a' : '-',
+ pgprot & _PAGE_DIRTY ? 'd' : '-',
+ pgprot & _PAGE_4M ? 'm' : 'k',
+ pgprot & _PAGE_GLOBAL ? 'g' : 'l' );
+#endif
+ DRM_SYSCTL_PRINT("\n");
+ for (i = vma->vm_start; i < vma->vm_end; i += PAGE_SIZE) {
+ pgd = pgd_offset(vma->vm_mm, i);
+ pmd = pmd_offset(pgd, i);
+ pte = pte_offset(pmd, i);
+ if (pte_present(*pte)) {
+ address = __pa(pte_page(*pte))
+ + (i & (PAGE_SIZE-1));
+ DRM_SYSCTL_PRINT(" 0x%08lx -> 0x%08lx"
+ " %c%c%c%c%c\n",
+ i,
+ address,
+ pte_read(*pte) ? 'r' : '-',
+ pte_write(*pte) ? 'w' : '-',
+ pte_exec(*pte) ? 'x' : '-',
+ pte_dirty(*pte) ? 'd' : '-',
+ pte_young(*pte) ? 'a' : '-' );
+ } else {
+ DRM_SYSCTL_PRINT(" 0x%08lx\n", i);
+ }
+ }
+ }
+
+ SYSCTL_OUT(req, "", 1);
+ return 0;
+}
+
+static int DRM(vma_info)DRM_SYSCTL_HANDLER_ARGS
+{
+ drm_device_t *dev = arg1;
+ int ret;
+
+ DRM_OS_LOCK;
+ ret = DRM(_vma_info)(oidp, arg1, arg2, req);
+ DRM_OS_UNLOCK;
+ return ret;
+}
+#endif
+
+
+#if DRM_DMA_HISTOGRAM
+static int DRM(_histo_info)DRM_SYSCTL_HANDLER_ARGS
+{
+ drm_device_t *dev = arg1;
+ drm_device_dma_t *dma = dev->dma;
+ int i;
+ unsigned long slot_value = DRM_DMA_HISTOGRAM_INITIAL;
+ unsigned long prev_value = 0;
+ drm_buf_t *buffer;
+ char buf[128];
+ int error;
+
+ DRM_SYSCTL_PRINT("general statistics:\n");
+ DRM_SYSCTL_PRINT("total %10u\n", atomic_read(&dev->histo.total));
+ DRM_SYSCTL_PRINT("open %10u\n", atomic_read(&dev->total_open));
+ DRM_SYSCTL_PRINT("close %10u\n", atomic_read(&dev->total_close));
+ DRM_SYSCTL_PRINT("ioctl %10u\n", atomic_read(&dev->total_ioctl));
+ DRM_SYSCTL_PRINT("irq %10u\n", atomic_read(&dev->total_irq));
+ DRM_SYSCTL_PRINT("ctx %10u\n", atomic_read(&dev->total_ctx));
+
+ DRM_SYSCTL_PRINT("\nlock statistics:\n");
+ DRM_SYSCTL_PRINT("locks %10u\n", atomic_read(&dev->total_locks));
+ DRM_SYSCTL_PRINT("unlocks %10u\n", atomic_read(&dev->total_unlocks));
+ DRM_SYSCTL_PRINT("contends %10u\n", atomic_read(&dev->total_contends));
+ DRM_SYSCTL_PRINT("sleeps %10u\n", atomic_read(&dev->total_sleeps));
+
+
+ if (dma) {
+ DRM_SYSCTL_PRINT("\ndma statistics:\n");
+ DRM_SYSCTL_PRINT("prio %10u\n",
+ atomic_read(&dma->total_prio));
+ DRM_SYSCTL_PRINT("bytes %10u\n",
+ atomic_read(&dma->total_bytes));
+ DRM_SYSCTL_PRINT("dmas %10u\n",
+ atomic_read(&dma->total_dmas));
+ DRM_SYSCTL_PRINT("missed:\n");
+ DRM_SYSCTL_PRINT(" dma %10u\n",
+ atomic_read(&dma->total_missed_dma));
+ DRM_SYSCTL_PRINT(" lock %10u\n",
+ atomic_read(&dma->total_missed_lock));
+ DRM_SYSCTL_PRINT(" free %10u\n",
+ atomic_read(&dma->total_missed_free));
+ DRM_SYSCTL_PRINT(" sched %10u\n",
+ atomic_read(&dma->total_missed_sched));
+ DRM_SYSCTL_PRINT("tried %10u\n",
+ atomic_read(&dma->total_tried));
+ DRM_SYSCTL_PRINT("hit %10u\n",
+ atomic_read(&dma->total_hit));
+ DRM_SYSCTL_PRINT("lost %10u\n",
+ atomic_read(&dma->total_lost));
+
+ buffer = dma->next_buffer;
+ if (buffer) {
+ DRM_SYSCTL_PRINT("next_buffer %7d\n", buffer->idx);
+ } else {
+ DRM_SYSCTL_PRINT("next_buffer none\n");
+ }
+ buffer = dma->this_buffer;
+ if (buffer) {
+ DRM_SYSCTL_PRINT("this_buffer %7d\n", buffer->idx);
+ } else {
+ DRM_SYSCTL_PRINT("this_buffer none\n");
+ }
+ }
+
+
+ DRM_SYSCTL_PRINT("\nvalues:\n");
+ if (dev->lock.hw_lock) {
+ DRM_SYSCTL_PRINT("lock 0x%08x\n",
+ dev->lock.hw_lock->lock);
+ } else {
+ DRM_SYSCTL_PRINT("lock none\n");
+ }
+ DRM_SYSCTL_PRINT("context_flag 0x%08x\n", dev->context_flag);
+ DRM_SYSCTL_PRINT("interrupt_flag 0x%08x\n", dev->interrupt_flag);
+ DRM_SYSCTL_PRINT("dma_flag 0x%08x\n", dev->dma_flag);
+
+ DRM_SYSCTL_PRINT("queue_count %10d\n", dev->queue_count);
+ DRM_SYSCTL_PRINT("last_context %10d\n", dev->last_context);
+ DRM_SYSCTL_PRINT("last_switch %10u\n", dev->last_switch);
+ DRM_SYSCTL_PRINT("last_checked %10d\n", dev->last_checked);
+
+
+ DRM_SYSCTL_PRINT("\n q2d d2c c2f"
+ " q2c q2f dma sch"
+ " ctx lacq lhld\n\n");
+ for (i = 0; i < DRM_DMA_HISTOGRAM_SLOTS; i++) {
+ DRM_SYSCTL_PRINT("%s %10lu %10u %10u %10u %10u %10u"
+ " %10u %10u %10u %10u %10u\n",
+ i == DRM_DMA_HISTOGRAM_SLOTS - 1 ? ">=" : "< ",
+ i == DRM_DMA_HISTOGRAM_SLOTS - 1
+ ? prev_value : slot_value ,
+
+ atomic_read(&dev->histo
+ .queued_to_dispatched[i]),
+ atomic_read(&dev->histo
+ .dispatched_to_completed[i]),
+ atomic_read(&dev->histo
+ .completed_to_freed[i]),
+
+ atomic_read(&dev->histo
+ .queued_to_completed[i]),
+ atomic_read(&dev->histo
+ .queued_to_freed[i]),
+ atomic_read(&dev->histo.dma[i]),
+ atomic_read(&dev->histo.schedule[i]),
+ atomic_read(&dev->histo.ctx[i]),
+ atomic_read(&dev->histo.lacq[i]),
+ atomic_read(&dev->histo.lhld[i]));
+ prev_value = slot_value;
+ slot_value = DRM_DMA_HISTOGRAM_NEXT(slot_value);
+ }
+ SYSCTL_OUT(req, "", 1);
+ return 0;
+}
+
+static int DRM(histo_info)DRM_SYSCTL_HANDLER_ARGS
+{
+ drm_device_t *dev = arg1;
+ int ret;
+
+ DRM_OS_LOCK;
+ ret = _drm_histo_info(oidp, arg1, arg2, req);
+ DRM_OS_UNLOCK;
+ return ret;
+}
+#endif
diff --git a/bsd/drm_vm.h b/bsd/drm_vm.h
new file mode 100644
index 00000000..a06fb448
--- /dev/null
+++ b/bsd/drm_vm.h
@@ -0,0 +1,81 @@
+#include <vm/vm.h>
+#include <vm/pmap.h>
+
+static int DRM(dma_mmap)(dev_t kdev, vm_offset_t offset, int prot)
+{
+ drm_device_t *dev = kdev->si_drv1;
+ drm_device_dma_t *dma = dev->dma;
+ unsigned long physical;
+ unsigned long page;
+
+ if (!dma) return -1; /* Error */
+ if (!dma->pagelist) return -1; /* Nothing allocated */
+
+ page = offset >> PAGE_SHIFT;
+ physical = dma->pagelist[page];
+
+ DRM_DEBUG("0x%08x (page %lu) => 0x%08lx\n", offset, page, physical);
+ return atop(physical);
+}
+
+int DRM(mmap)(dev_t kdev, vm_offset_t offset, int prot)
+{
+ drm_device_t *dev = kdev->si_drv1;
+ drm_map_t *map = NULL;
+ drm_map_list_entry_t *listentry=NULL;
+ /*drm_file_t *priv;*/
+
+/* DRM_DEBUG("offset = 0x%x\n", offset);*/
+
+ /*XXX Fixme */
+ /*priv = DRM(find_file_by_proc)(dev, p);
+ if (!priv) {
+ DRM_DEBUG("can't find authenticator\n");
+ return EINVAL;
+ }
+
+ if (!priv->authenticated) DRM_OS_RETURN(EACCES);*/
+
+ if (dev->dma
+ && offset >= 0
+ && offset < ptoa(dev->dma->page_count))
+ return DRM(dma_mmap)(kdev, offset, prot);
+
+ /* A sequential search of a linked list is
+ fine here because: 1) there will only be
+ about 5-10 entries in the list and, 2) a
+ DRI client only has to do this mapping
+ once, so it doesn't have to be optimized
+ for performance, even if the list was a
+ bit longer. */
+ TAILQ_FOREACH(listentry, dev->maplist, link) {
+ map = listentry->map;
+/* DRM_DEBUG("considering 0x%x..0x%x\n", map->offset, map->offset + map->size - 1);*/
+ if (offset >= map->offset
+ && offset < map->offset + map->size) break;
+ }
+
+ if (!listentry) {
+ DRM_DEBUG("can't find map\n");
+ return -1;
+ }
+ if (((map->flags&_DRM_RESTRICTED) && suser(curproc))) {
+ DRM_DEBUG("restricted map\n");
+ return -1;
+ }
+
+ switch (map->type) {
+ case _DRM_FRAME_BUFFER:
+ case _DRM_REGISTERS:
+ case _DRM_AGP:
+ return atop(offset);
+ case _DRM_SHM:
+ return atop(vtophys(offset));
+ default:
+ return -1; /* This should never happen. */
+ }
+ DRM_DEBUG("bailing out\n");
+
+ return -1;
+}
+
diff --git a/bsd/gamma/Makefile b/bsd/gamma/Makefile
index 37f3f5e7..97bb1b69 100644
--- a/bsd/gamma/Makefile
+++ b/bsd/gamma/Makefile
@@ -1,10 +1,10 @@
# $FreeBSD$
KMOD = gamma
+NOMAN= YES
SRCS = gamma_drv.c gamma_dma.c
-SRCS += device_if.h bus_if.h pci_if.h
+SRCS += device_if.h bus_if.h pci_if.h opt_drm_linux.h
CFLAGS += ${DEBUG_FLAGS} -I. -I..
-KMODDEPS = drm
@:
ln -sf /sys @
@@ -12,4 +12,14 @@ KMODDEPS = drm
machine:
ln -sf /sys/i386/include machine
+.if ${MACHINE_ARCH} == "i386"
+# This line enables linux ioctl handling
+# If you want support for this uncomment this line
+#TDFX_OPTS= "\#define DRM_LINUX" 1
+.endif
+
+opt_drm_linux.h:
+ touch opt_drm_linux.h
+ echo $(TDFX_OPTS) >> opt_drm_linux.h
+
.include <bsd.kmod.mk>
diff --git a/bsd/i810_drm.h b/bsd/i810_drm.h
index 4c8e09f6..f2114dd1 100644
--- a/bsd/i810_drm.h
+++ b/bsd/i810_drm.h
@@ -98,8 +98,13 @@ typedef struct _drm_i810_init {
I810_INIT_DMA = 0x01,
I810_CLEANUP_DMA = 0x02
} func;
+#if CONFIG_XFREE86_VERSION < XFREE86_VERSION(4,1,0,0)
int ring_map_idx;
int buffer_map_idx;
+#else
+ unsigned int mmio_offset;
+ unsigned int buffers_offset;
+#endif
int sarea_priv_offset;
unsigned int ring_start;
unsigned int ring_end;
@@ -107,6 +112,8 @@ typedef struct _drm_i810_init {
unsigned int front_offset;
unsigned int back_offset;
unsigned int depth_offset;
+ unsigned int overlay_offset;
+ unsigned int overlay_physical;
unsigned int w;
unsigned int h;
unsigned int pitch;
@@ -178,6 +185,12 @@ typedef struct _drm_i810_vertex {
int discard; /* client is finished with the buffer? */
} drm_i810_vertex_t;
+typedef struct _drm_i810_copy_t {
+ int idx; /* buffer index */
+ int used; /* nr bytes in use */
+ void *address; /* Address to copy from */
+} drm_i810_copy_t;
+
typedef struct drm_i810_dma {
void *virtual;
int request_idx;
diff --git a/bsd/mga/Makefile b/bsd/mga/Makefile
new file mode 100644
index 00000000..bbaeaa56
--- /dev/null
+++ b/bsd/mga/Makefile
@@ -0,0 +1,25 @@
+# $FreeBSD$
+
+KMOD= mga
+NOMAN= YES
+SRCS= mga_drv.c mga_state.c mga_warp.c mga_dma.c
+SRCS+= device_if.h bus_if.h pci_if.h opt_drm_linux.h
+CFLAGS+= ${DEBUG_FLAGS} -I. -I..
+
+@:
+ ln -sf /sys @
+
+machine:
+ ln -sf /sys/i386/include machine
+
+.if ${MACHINE_ARCH} == "i386"
+# This line enables linux ioctl handling
+# If you want support for this uncomment this line
+#MGA_OPTS= "\#define DRM_LINUX" 1
+.endif
+
+opt_drm_linux.h:
+ touch opt_drm_linux.h
+ echo $(MGA_OPTS) >> opt_drm_linux.h
+
+.include <bsd.kmod.mk>
diff --git a/bsd/mga/mga_dma.c b/bsd/mga/mga_dma.c
index 85c29df7..9ed5d095 100644
--- a/bsd/mga/mga_dma.c
+++ b/bsd/mga/mga_dma.c
@@ -1,4 +1,4 @@
-/* mga_dma.c -- DMA support for mga g200/g400
+/* mga_dma.c -- DMA support for mga g200/g400 -*- linux-c -*-
* Created: Mon Dec 13 01:50:01 1999 by jhartmann@precisioninsight.com
*
* Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
@@ -11,11 +11,11 @@
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
- *
+ *
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
- *
+ *
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
@@ -24,1054 +24,796 @@
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*
- * Authors: Rickard E. (Rik) Faith <faith@valinux.com>
- * Jeff Hartmann <jhartmann@valinux.com>
- * Keith Whitwell <keithw@valinux.com>
+ * Authors:
+ * Rickard E. (Rik) Faith <faith@valinux.com>
+ * Jeff Hartmann <jhartmann@valinux.com>
+ * Keith Whitwell <keithw@valinux.com>
*
+ * Rewritten by:
+ * Gareth Hughes <gareth@valinux.com>
*/
#define __NO_VERSION__
+#include "mga.h"
#include "drmP.h"
#include "mga_drv.h"
-#include <machine/bus.h>
-#include <machine/resource.h>
-#include <sys/rman.h>
-#include <vm/vm.h>
-#include <vm/pmap.h>
-#define MGA_REG(reg) 2
-#define MGA_BASE(reg) ((unsigned long) \
- ((drm_device_t *)dev)->maplist[MGA_REG(reg)]->handle)
-#define MGA_ADDR(reg) (MGA_BASE(reg) + reg)
-#define MGA_DEREF(reg) *(__volatile__ int *)MGA_ADDR(reg)
-#define MGA_READ(reg) MGA_DEREF(reg)
-#define MGA_WRITE(reg,val) do { MGA_DEREF(reg) = val; } while (0)
+#define MGA_DEFAULT_USEC_TIMEOUT 10000
+#define MGA_FREELIST_DEBUG 0
-#define PDEA_pagpxfer_enable 0x2
-static int mga_flush_queue(drm_device_t *dev);
+/* ================================================================
+ * Engine control
+ */
-static unsigned long mga_alloc_page(drm_device_t *dev)
+int mga_do_wait_for_idle( drm_mga_private_t *dev_priv )
{
- unsigned long address;
-
- DRM_DEBUG("%s\n", __FUNCTION__);
+ u32 status = 0;
+ int i;
+ DRM_DEBUG( "%s\n", __FUNCTION__ );
- address = (unsigned long) drm_alloc(PAGE_SIZE, DRM_MEM_DMA);
- if(address == 0UL) {
- return 0;
+ for ( i = 0 ; i < dev_priv->usec_timeout ; i++ ) {
+ status = MGA_READ( MGA_STATUS ) & MGA_ENGINE_IDLE_MASK;
+ if ( status == MGA_ENDPRDMASTS ) {
+ MGA_WRITE8( MGA_CRTC_INDEX, 0 );
+ return 0;
+ }
+ DRM_OS_DELAY( 1 );
}
-
- return address;
+
+#if MGA_DMA_DEBUG
+ DRM_ERROR( "failed!\n" );
+ DRM_INFO( " status=0x%08x\n", status );
+#endif
+ DRM_OS_RETURN(EBUSY);
}
-static void mga_free_page(drm_device_t *dev, unsigned long page)
+int mga_do_dma_idle( drm_mga_private_t *dev_priv )
{
- DRM_DEBUG("%s\n", __FUNCTION__);
+ u32 status = 0;
+ int i;
+ DRM_DEBUG( "%s\n", __FUNCTION__ );
- if(page == 0UL) {
- return;
+ for ( i = 0 ; i < dev_priv->usec_timeout ; i++ ) {
+ status = MGA_READ( MGA_STATUS ) & MGA_DMA_IDLE_MASK;
+ if ( status == MGA_ENDPRDMASTS ) return 0;
+ DRM_OS_DELAY( 1 );
}
- drm_free((void *) page, PAGE_SIZE, DRM_MEM_DMA);
- return;
-}
-static void mga_delay(void)
-{
- return;
+#if MGA_DMA_DEBUG
+ DRM_ERROR( "failed! status=0x%08x\n", status );
+#endif
+ DRM_OS_RETURN(EBUSY);
}
-void mga_flush_write_combine(void)
+int mga_do_dma_reset( drm_mga_private_t *dev_priv )
{
- int xchangeDummy;
- DRM_DEBUG("%s\n", __FUNCTION__);
+ drm_mga_sarea_t *sarea_priv = dev_priv->sarea_priv;
+ drm_mga_primary_buffer_t *primary = &dev_priv->prim;
- __asm__ volatile(" push %%eax ; xchg %%eax, %0 ; pop %%eax" : : "m" (xchangeDummy));
- __asm__ volatile(" push %%eax ; push %%ebx ; push %%ecx ; push %%edx ;"
- " movl $0,%%eax ; cpuid ; pop %%edx ; pop %%ecx ; pop %%ebx ;"
- " pop %%eax" : /* no outputs */ : /* no inputs */ );
-}
+ DRM_DEBUG( "%s\n", __FUNCTION__ );
-/* These are two age tags that will never be sent to
- * the hardware */
-#define MGA_BUF_USED 0xffffffff
-#define MGA_BUF_FREE 0
+ /* The primary DMA stream should look like new right about now.
+ */
+ primary->tail = 0;
+ primary->space = primary->size;
+ primary->last_flush = 0;
-static int mga_freelist_init(drm_device_t *dev)
-{
- drm_device_dma_t *dma = dev->dma;
- drm_buf_t *buf;
- drm_mga_buf_priv_t *buf_priv;
- drm_mga_private_t *dev_priv = (drm_mga_private_t *)dev->dev_private;
- drm_mga_freelist_t *item;
- int i;
-
- DRM_DEBUG("%s\n", __FUNCTION__);
-
- dev_priv->head = drm_alloc(sizeof(drm_mga_freelist_t), DRM_MEM_DRIVER);
- if(dev_priv->head == NULL) return ENOMEM;
- memset(dev_priv->head, 0, sizeof(drm_mga_freelist_t));
- dev_priv->head->age = MGA_BUF_USED;
-
- for (i = 0; i < dma->buf_count; i++) {
- buf = dma->buflist[ i ];
- buf_priv = buf->dev_private;
- item = drm_alloc(sizeof(drm_mga_freelist_t),
- DRM_MEM_DRIVER);
- if(item == NULL) return ENOMEM;
- memset(item, 0, sizeof(drm_mga_freelist_t));
- item->age = MGA_BUF_FREE;
- item->prev = dev_priv->head;
- item->next = dev_priv->head->next;
- if(dev_priv->head->next != NULL)
- dev_priv->head->next->prev = item;
- if(item->next == NULL) dev_priv->tail = item;
- item->buf = buf;
- buf_priv->my_freelist = item;
- buf_priv->discard = 0;
- dev_priv->head->next = item;
- }
-
- return 0;
-}
+ sarea_priv->last_wrap = 0;
-static void mga_freelist_cleanup(drm_device_t *dev)
-{
- drm_mga_private_t *dev_priv = (drm_mga_private_t *)dev->dev_private;
- drm_mga_freelist_t *item;
- drm_mga_freelist_t *prev;
+ /* FIXME: Reset counters, buffer ages etc...
+ */
- DRM_DEBUG("%s\n", __FUNCTION__);
+ /* FIXME: What else do we need to reinitialize? WARP stuff?
+ */
- item = dev_priv->head;
- while(item) {
- prev = item;
- item = item->next;
- drm_free(prev, sizeof(drm_mga_freelist_t), DRM_MEM_DRIVER);
- }
-
- dev_priv->head = dev_priv->tail = NULL;
+ return 0;
}
-/* Frees dispatch lock */
-static __inline void mga_dma_quiescent(drm_device_t *dev)
+int mga_do_engine_reset( drm_mga_private_t *dev_priv )
{
- drm_device_dma_t *dma = dev->dma;
- drm_mga_private_t *dev_priv = (drm_mga_private_t *)dev->dev_private;
- drm_mga_sarea_t *sarea_priv = dev_priv->sarea_priv;
- unsigned long end;
- int i;
+ DRM_DEBUG( "%s\n", __FUNCTION__ );
- DRM_DEBUG("%s\n", __FUNCTION__);
- end = ticks + (hz*3);
- while(1) {
- if(!test_and_set_bit(MGA_IN_DISPATCH,
- &dev_priv->dispatch_status)) {
- break;
- }
- if((signed)(end - ticks) <= 0) {
- DRM_ERROR("irqs: %d wanted %d\n",
- atomic_read(&dev->total_irq),
- atomic_read(&dma->total_lost));
- DRM_ERROR("lockup\n");
- return;
- }
- for (i = 0 ; i < 2000 ; i++) mga_delay();
- }
- end = ticks + (hz*3);
- DRM_DEBUG("quiescent status : %x\n", MGA_READ(MGAREG_STATUS));
- while((MGA_READ(MGAREG_STATUS) & 0x00030001) != 0x00020000) {
- if((signed)(end - ticks) <= 0) {
- DRM_ERROR("irqs: %d wanted %d\n",
- atomic_read(&dev->total_irq),
- atomic_read(&dma->total_lost));
- DRM_ERROR("lockup\n");
- clear_bit(MGA_IN_DISPATCH, &dev_priv->dispatch_status);
- return;
- }
- for (i = 0 ; i < 2000 ; i++) mga_delay();
- }
- sarea_priv->dirty |= MGA_DMA_FLUSH;
+ /* Okay, so we've completely screwed up and locked the engine.
+ * How about we clean up after ourselves?
+ */
+ MGA_WRITE( MGA_RST, MGA_SOFTRESET );
+ DRM_OS_DELAY( 15 ); /* Wait at least 10 usecs */
+ MGA_WRITE( MGA_RST, 0 );
+
+ /* Initialize the registers that get clobbered by the soft
+ * reset. Many of the core register values survive a reset,
+ * but the drawing registers are basically all gone.
+ *
+ * 3D clients should probably die after calling this. The X
+ * server should reset the engine state to known values.
+ */
+#if 0
+ MGA_WRITE( MGA_PRIMPTR,
+ virt_to_bus((void *)dev_priv->prim.status_page) |
+ MGA_PRIMPTREN0 |
+ MGA_PRIMPTREN1 );
+#endif
- clear_bit(MGA_IN_DISPATCH, &dev_priv->dispatch_status);
- DRM_DEBUG("exit, dispatch_status = 0x%02x\n",dev_priv->dispatch_status);
-}
+ MGA_WRITE( MGA_ICLEAR, MGA_SOFTRAPICLR );
+ MGA_WRITE( MGA_IEN, MGA_SOFTRAPIEN );
-static void mga_reset_freelist(drm_device_t *dev)
-{
- drm_device_dma_t *dma = dev->dma;
- drm_buf_t *buf;
- drm_mga_buf_priv_t *buf_priv;
- int i;
+ /* The primary DMA stream should look like new right about now.
+ */
+ mga_do_dma_reset( dev_priv );
- for (i = 0; i < dma->buf_count; i++) {
- buf = dma->buflist[ i ];
- buf_priv = buf->dev_private;
- buf_priv->my_freelist->age = MGA_BUF_FREE;
- }
+ /* This bad boy will never fail.
+ */
+ return 0;
}
-/* Least recently used :
- * These operations are not atomic b/c they are protected by the
- * hardware lock */
-drm_buf_t *mga_freelist_get(drm_device_t *dev)
+/* ================================================================
+ * Primary DMA stream
+ */
+
+void mga_do_dma_flush( drm_mga_private_t *dev_priv )
{
- drm_mga_private_t *dev_priv =
- (drm_mga_private_t *) dev->dev_private;
- drm_mga_freelist_t *prev;
- drm_mga_freelist_t *next;
- static int failed = 0;
- int ret, s;
-
- DRM_DEBUG("%s : tail->age : %d last_prim_age : %d\n", __FUNCTION__,
- dev_priv->tail->age, dev_priv->last_prim_age);
-
- if(failed >= 1000 && dev_priv->tail->age >= dev_priv->last_prim_age) {
- DRM_DEBUG("I'm waiting on the freelist!!! %d\n",
- dev_priv->last_prim_age);
- s = splsofttq();
- set_bit(MGA_IN_GETBUF, &dev_priv->dispatch_status);
- for (;;) {
- mga_dma_schedule(dev, 0);
-/* if(!test_bit(MGA_IN_GETBUF,
- &dev_priv->dispatch_status)) */
- if(dev_priv->tail->age < dev_priv->last_prim_age)
- break;
- atomic_inc(&dev->total_sleeps);
- ret = tsleep(&dev_priv->buf_queue, PZERO|PCATCH,
- "mgafg", 0);
- if (ret == EINTR) {
- clear_bit(MGA_IN_GETBUF,
- &dev_priv->dispatch_status);
- break;
- }
- }
- splx(s);
- clear_bit(MGA_IN_GETBUF, &dev_priv->dispatch_status);
- if (ret) return NULL;
- }
-
- if(dev_priv->tail->age < dev_priv->last_prim_age) {
- prev = dev_priv->tail->prev;
- next = dev_priv->tail;
- prev->next = NULL;
- next->prev = next->next = NULL;
- dev_priv->tail = prev;
- next->age = MGA_BUF_USED;
- failed = 0;
- return next->buf;
+ drm_mga_primary_buffer_t *primary = &dev_priv->prim;
+ u32 head, tail;
+ DMA_LOCALS;
+ DRM_DEBUG( "%s:\n", __FUNCTION__ );
+
+ if ( primary->tail == primary->last_flush ) {
+ DRM_DEBUG( " bailing out...\n" );
+ return;
}
- failed++;
- return NULL;
-}
+ tail = primary->tail + dev_priv->primary->offset;
-int mga_freelist_put(drm_device_t *dev, drm_buf_t *buf)
-{
- drm_mga_private_t *dev_priv =
- (drm_mga_private_t *) dev->dev_private;
- drm_mga_buf_priv_t *buf_priv = buf->dev_private;
- drm_mga_freelist_t *prev;
- drm_mga_freelist_t *head;
- drm_mga_freelist_t *next;
+ /* We need to pad the stream between flushes, as the card
+ * actually (partially?) reads the first of these commands.
+ * See page 4-16 in the G400 manual, middle of the page or so.
+ */
+ BEGIN_DMA( 1 );
- DRM_DEBUG("%s\n", __FUNCTION__);
+ DMA_BLOCK( MGA_DMAPAD, 0x00000000,
+ MGA_DMAPAD, 0x00000000,
+ MGA_DMAPAD, 0x00000000,
+ MGA_DMAPAD, 0x00000000 );
- if(buf_priv->my_freelist->age == MGA_BUF_USED) {
- /* Discarded buffer, put it on the tail */
- next = buf_priv->my_freelist;
- next->age = MGA_BUF_FREE;
- prev = dev_priv->tail;
- prev->next = next;
- next->prev = prev;
- next->next = NULL;
- dev_priv->tail = next;
- DRM_DEBUG("Discarded\n");
+ ADVANCE_DMA();
+
+ primary->last_flush = primary->tail;
+
+ head = MGA_READ( MGA_PRIMADDRESS );
+
+ if ( head <= tail ) {
+ primary->space = primary->size - primary->tail;
} else {
- /* Normally aged buffer, put it on the head + 1,
- * as the real head is a sentinal element
- */
- next = buf_priv->my_freelist;
- head = dev_priv->head;
- prev = head->next;
- head->next = next;
- prev->prev = next;
- next->prev = head;
- next->next = prev;
+ primary->space = head - tail;
}
-
- return 0;
-}
-static int mga_init_primary_bufs(drm_device_t *dev, drm_mga_init_t *init)
-{
- drm_mga_private_t *dev_priv = dev->dev_private;
- drm_mga_prim_buf_t *prim_buffer;
- int i, temp, size_of_buf;
- int offset = init->reserved_map_agpstart;
-
- DRM_DEBUG("%s\n", __FUNCTION__);
- dev_priv->primary_size = ((init->primary_size + PAGE_SIZE - 1) /
- PAGE_SIZE) * PAGE_SIZE;
- size_of_buf = dev_priv->primary_size / MGA_NUM_PRIM_BUFS;
- dev_priv->warp_ucode_size = init->warp_ucode_size;
- dev_priv->prim_bufs = drm_alloc(sizeof(drm_mga_prim_buf_t *) *
- (MGA_NUM_PRIM_BUFS + 1),
- DRM_MEM_DRIVER);
- if(dev_priv->prim_bufs == NULL) {
- DRM_ERROR("Unable to allocate memory for prim_buf\n");
- return ENOMEM;
- }
- memset(dev_priv->prim_bufs,
- 0, sizeof(drm_mga_prim_buf_t *) * (MGA_NUM_PRIM_BUFS + 1));
-
- temp = init->warp_ucode_size + dev_priv->primary_size;
- temp = ((temp + PAGE_SIZE - 1) / PAGE_SIZE) * PAGE_SIZE;
-
- dev_priv->ioremap = drm_ioremap(dev->agp->base + offset,
- temp);
- if(dev_priv->ioremap == NULL) {
- DRM_DEBUG("Ioremap failed\n");
- return ENOMEM;
- }
- dev_priv->wait_queue = 0;
-
- for(i = 0; i < MGA_NUM_PRIM_BUFS; i++) {
- prim_buffer = drm_alloc(sizeof(drm_mga_prim_buf_t),
- DRM_MEM_DRIVER);
- if(prim_buffer == NULL) return ENOMEM;
- memset(prim_buffer, 0, sizeof(drm_mga_prim_buf_t));
- prim_buffer->phys_head = offset + dev->agp->base;
- prim_buffer->current_dma_ptr =
- prim_buffer->head =
- (u_int32_t *) (dev_priv->ioremap +
- offset -
- init->reserved_map_agpstart);
- prim_buffer->num_dwords = 0;
- prim_buffer->max_dwords = size_of_buf / sizeof(u_int32_t);
- prim_buffer->max_dwords -= 5; /* Leave room for the softrap */
- prim_buffer->sec_used = 0;
- prim_buffer->idx = i;
- prim_buffer->prim_age = i + 1;
- offset = offset + size_of_buf;
- dev_priv->prim_bufs[i] = prim_buffer;
- }
- dev_priv->current_prim_idx = 0;
- dev_priv->next_prim =
- dev_priv->last_prim =
- dev_priv->current_prim =
- dev_priv->prim_bufs[0];
- dev_priv->next_prim_age = 2;
- dev_priv->last_prim_age = 1;
- set_bit(MGA_BUF_IN_USE, &dev_priv->current_prim->buffer_status);
- return 0;
+ DRM_DEBUG( " head = 0x%06lx\n", head - dev_priv->primary->offset );
+ DRM_DEBUG( " tail = 0x%06lx\n", tail - dev_priv->primary->offset );
+ DRM_DEBUG( " space = 0x%06x\n", primary->space );
+
+ mga_flush_write_combine();
+ MGA_WRITE( MGA_PRIMEND, tail | MGA_PAGPXFER );
+
+ DRM_DEBUG( "%s: done.\n", __FUNCTION__ );
}
-static void mga_fire_primary(drm_device_t *dev, drm_mga_prim_buf_t *prim)
+void mga_do_dma_wrap_start( drm_mga_private_t *dev_priv )
{
- drm_mga_private_t *dev_priv = dev->dev_private;
- drm_device_dma_t *dma = dev->dma;
- drm_mga_sarea_t *sarea_priv = dev_priv->sarea_priv;
- int use_agp = PDEA_pagpxfer_enable;
- unsigned long end;
- int i;
- int next_idx;
- PRIMLOCALS;
-
- DRM_DEBUG("%s\n", __FUNCTION__);
- dev_priv->last_prim = prim;
-
- /* We never check for overflow, b/c there is always room */
- PRIMPTR(prim);
- if(num_dwords <= 0) {
- DRM_DEBUG("num_dwords == 0 when dispatched\n");
- goto out_prim_wait;
- }
- PRIMOUTREG( MGAREG_DMAPAD, 0);
- PRIMOUTREG( MGAREG_DMAPAD, 0);
- PRIMOUTREG( MGAREG_DMAPAD, 0);
- PRIMOUTREG( MGAREG_SOFTRAP, 0);
- PRIMFINISH(prim);
-
- end = ticks + (hz*3);
- if(sarea_priv->dirty & MGA_DMA_FLUSH) {
- DRM_DEBUG("Dma top flush\n");
- while((MGA_READ(MGAREG_STATUS) & 0x00030001) != 0x00020000) {
- if((signed)(end - ticks) <= 0) {
- DRM_ERROR("irqs: %d wanted %d\n",
- atomic_read(&dev->total_irq),
- atomic_read(&dma->total_lost));
- DRM_ERROR("lockup in fire primary "
- "(Dma Top Flush)\n");
- goto out_prim_wait;
- }
-
- for (i = 0 ; i < 4096 ; i++) mga_delay();
- }
- sarea_priv->dirty &= ~(MGA_DMA_FLUSH);
+ drm_mga_primary_buffer_t *primary = &dev_priv->prim;
+ u32 head, tail;
+ DMA_LOCALS;
+ DRM_DEBUG( "%s:\n", __FUNCTION__ );
+
+ BEGIN_DMA_WRAP();
+
+ DMA_BLOCK( MGA_DMAPAD, 0x00000000,
+ MGA_DMAPAD, 0x00000000,
+ MGA_DMAPAD, 0x00000000,
+ MGA_DMAPAD, 0x00000000 );
+
+ ADVANCE_DMA();
+
+ tail = primary->tail + dev_priv->primary->offset;
+
+ primary->tail = 0;
+ primary->last_flush = 0;
+ primary->last_wrap++;
+
+ head = MGA_READ( MGA_PRIMADDRESS );
+
+ if ( head == dev_priv->primary->offset ) {
+ primary->space = primary->size;
} else {
- DRM_DEBUG("Status wait\n");
- while((MGA_READ(MGAREG_STATUS) & 0x00020001) != 0x00020000) {
- if((signed)(end - ticks) <= 0) {
- DRM_ERROR("irqs: %d wanted %d\n",
- atomic_read(&dev->total_irq),
- atomic_read(&dma->total_lost));
- DRM_ERROR("lockup in fire primary "
- "(Status Wait)\n");
- goto out_prim_wait;
- }
-
- for (i = 0 ; i < 4096 ; i++) mga_delay();
- }
+ primary->space = head - dev_priv->primary->offset;
}
- mga_flush_write_combine();
- atomic_inc(&dev_priv->pending_bufs);
- MGA_WRITE(MGAREG_PRIMADDRESS, phys_head | TT_GENERAL);
- MGA_WRITE(MGAREG_PRIMEND, (phys_head + num_dwords * 4) | use_agp);
- prim->num_dwords = 0;
- sarea_priv->last_enqueue = prim->prim_age;
-
- next_idx = prim->idx + 1;
- if(next_idx >= MGA_NUM_PRIM_BUFS)
- next_idx = 0;
-
- dev_priv->next_prim = dev_priv->prim_bufs[next_idx];
- return;
-
- out_prim_wait:
- prim->num_dwords = 0;
- prim->sec_used = 0;
- clear_bit(MGA_BUF_IN_USE, &prim->buffer_status);
- wakeup(&dev_priv->wait_queue);
- clear_bit(MGA_BUF_SWAP_PENDING, &prim->buffer_status);
- clear_bit(MGA_IN_DISPATCH, &dev_priv->dispatch_status);
+ DRM_DEBUG( " head = 0x%06lx\n",
+ head - dev_priv->primary->offset );
+ DRM_DEBUG( " tail = 0x%06x\n", primary->tail );
+ DRM_DEBUG( " wrap = %d\n", primary->last_wrap );
+ DRM_DEBUG( " space = 0x%06x\n", primary->space );
+
+ mga_flush_write_combine();
+ MGA_WRITE( MGA_PRIMEND, tail | MGA_PAGPXFER );
+
+ set_bit( 0, &primary->wrapped );
+ DRM_DEBUG( "%s: done.\n", __FUNCTION__ );
}
-int mga_advance_primary(drm_device_t *dev)
+void mga_do_dma_wrap_end( drm_mga_private_t *dev_priv )
{
- drm_mga_private_t *dev_priv = dev->dev_private;
- drm_mga_prim_buf_t *prim_buffer;
- drm_device_dma_t *dma = dev->dma;
- int next_prim_idx;
- int ret = 0;
- int s;
-
- /* This needs to reset the primary buffer if available,
- * we should collect stats on how many times it bites
- * it's tail */
- DRM_DEBUG("%s\n", __FUNCTION__);
-
- next_prim_idx = dev_priv->current_prim_idx + 1;
- if(next_prim_idx >= MGA_NUM_PRIM_BUFS)
- next_prim_idx = 0;
- prim_buffer = dev_priv->prim_bufs[next_prim_idx];
- set_bit(MGA_IN_WAIT, &dev_priv->dispatch_status);
-
- /* In use is cleared in interrupt handler */
-
- s = splsofttq();
- if(test_and_set_bit(MGA_BUF_IN_USE, &prim_buffer->buffer_status)) {
- for (;;) {
- mga_dma_schedule(dev, 0);
- if(!test_and_set_bit(MGA_BUF_IN_USE,
- &prim_buffer->buffer_status))
- break;
- atomic_inc(&dev->total_sleeps);
- atomic_inc(&dma->total_missed_sched);
- ret = tsleep(&dev_priv->wait_queue, PZERO|PCATCH,
- "mgaap", 0);
- if (ret)
- break;
- }
- if(ret) {
- splx(s);
- return ret;
- }
- }
- clear_bit(MGA_IN_WAIT, &dev_priv->dispatch_status);
- splx(s);
-
- /* This primary buffer is now free to use */
- prim_buffer->current_dma_ptr = prim_buffer->head;
- prim_buffer->num_dwords = 0;
- prim_buffer->sec_used = 0;
- prim_buffer->prim_age = dev_priv->next_prim_age++;
- if(prim_buffer->prim_age == 0 || prim_buffer->prim_age == 0xffffffff) {
- mga_flush_queue(dev);
- mga_dma_quiescent(dev);
- mga_reset_freelist(dev);
- prim_buffer->prim_age = (dev_priv->next_prim_age += 2);
- }
+ drm_mga_primary_buffer_t *primary = &dev_priv->prim;
+ drm_mga_sarea_t *sarea_priv = dev_priv->sarea_priv;
+ u32 head = dev_priv->primary->offset;
+ DRM_DEBUG( "%s:\n", __FUNCTION__ );
+
+ sarea_priv->last_wrap++;
+ DRM_DEBUG( " wrap = %d\n", sarea_priv->last_wrap );
- /* Reset all buffer status stuff */
- clear_bit(MGA_BUF_NEEDS_OVERFLOW, &prim_buffer->buffer_status);
- clear_bit(MGA_BUF_FORCE_FIRE, &prim_buffer->buffer_status);
- clear_bit(MGA_BUF_SWAP_PENDING, &prim_buffer->buffer_status);
+ mga_flush_write_combine();
+ MGA_WRITE( MGA_PRIMADDRESS, head | MGA_DMA_GENERAL );
- dev_priv->current_prim = prim_buffer;
- dev_priv->current_prim_idx = next_prim_idx;
- return 0;
+ clear_bit( 0, &primary->wrapped );
+ DRM_DEBUG( "%s: done.\n", __FUNCTION__ );
}
-/* More dynamic performance decisions */
-static __inline int mga_decide_to_fire(drm_device_t *dev)
+
+/* ================================================================
+ * Freelist management
+ */
+
+#define MGA_BUFFER_USED ~0
+#define MGA_BUFFER_FREE 0
+
+#if MGA_FREELIST_DEBUG
+static void mga_freelist_print( drm_device_t *dev )
{
- drm_mga_private_t *dev_priv = (drm_mga_private_t *)dev->dev_private;
+ drm_mga_private_t *dev_priv = dev->dev_private;
+ drm_mga_freelist_t *entry;
+
+ DRM_INFO( "\n" );
+ DRM_INFO( "current dispatch: last=0x%x done=0x%x\n",
+ dev_priv->sarea_priv->last_dispatch,
+ (unsigned int)(MGA_READ( MGA_PRIMADDRESS ) -
+ dev_priv->primary->offset) );
+ DRM_INFO( "current freelist:\n" );
+
+ for ( entry = dev_priv->head->next ; entry ; entry = entry->next ) {
+ DRM_INFO( " %p idx=%2d age=0x%x 0x%06lx\n",
+ entry, entry->buf->idx, entry->age.head,
+ entry->age.head - dev_priv->primary->offset );
+ }
+ DRM_INFO( "\n" );
+}
+#endif
- DRM_DEBUG("%s\n", __FUNCTION__);
+static int mga_freelist_init( drm_device_t *dev, drm_mga_private_t *dev_priv )
+{
+ drm_device_dma_t *dma = dev->dma;
+ drm_buf_t *buf;
+ drm_mga_buf_priv_t *buf_priv;
+ drm_mga_freelist_t *entry;
+ int i;
+ DRM_DEBUG( "%s: count=%d\n",
+ __FUNCTION__, dma->buf_count );
- if(test_bit(MGA_BUF_FORCE_FIRE, &dev_priv->next_prim->buffer_status)) {
- return 1;
- }
+ dev_priv->head = DRM(alloc)( sizeof(drm_mga_freelist_t),
+ DRM_MEM_DRIVER );
+ if ( dev_priv->head == NULL )
+ DRM_OS_RETURN(ENOMEM);
- if (test_bit(MGA_IN_GETBUF, &dev_priv->dispatch_status) &&
- dev_priv->next_prim->num_dwords) {
- return 1;
- }
+ memset( dev_priv->head, 0, sizeof(drm_mga_freelist_t) );
+ SET_AGE( &dev_priv->head->age, MGA_BUFFER_USED, 0 );
- if (test_bit(MGA_IN_FLUSH, &dev_priv->dispatch_status) &&
- dev_priv->next_prim->num_dwords) {
- return 1;
- }
-
- if(atomic_read(&dev_priv->pending_bufs) <= MGA_NUM_PRIM_BUFS - 1) {
- if(test_bit(MGA_BUF_SWAP_PENDING,
- &dev_priv->next_prim->buffer_status)) {
- return 1;
- }
- }
+ for ( i = 0 ; i < dma->buf_count ; i++ ) {
+ buf = dma->buflist[i];
+ buf_priv = buf->dev_private;
- if(atomic_read(&dev_priv->pending_bufs) <= MGA_NUM_PRIM_BUFS / 2) {
- if(dev_priv->next_prim->sec_used >= MGA_DMA_BUF_NR / 8) {
- return 1;
- }
- }
+ entry = DRM(alloc)( sizeof(drm_mga_freelist_t),
+ DRM_MEM_DRIVER );
+ if ( entry == NULL )
+ DRM_OS_RETURN(ENOMEM);
- if(atomic_read(&dev_priv->pending_bufs) >= MGA_NUM_PRIM_BUFS / 2) {
- if(dev_priv->next_prim->sec_used >= MGA_DMA_BUF_NR / 4) {
- return 1;
- }
- }
+ memset( entry, 0, sizeof(drm_mga_freelist_t) );
- return 0;
-}
+ entry->next = dev_priv->head->next;
+ entry->prev = dev_priv->head;
+ SET_AGE( &entry->age, MGA_BUFFER_FREE, 0 );
+ entry->buf = buf;
-int mga_dma_schedule(drm_device_t *dev, int locked)
-{
- drm_mga_private_t *dev_priv = (drm_mga_private_t *)dev->dev_private;
- int retval =0 ;
+ if ( dev_priv->head->next != NULL )
+ dev_priv->head->next->prev = entry;
+ if ( entry->next == NULL )
+ dev_priv->tail = entry;
- if (!dev_priv) return EBUSY;
+ buf_priv->list_entry = entry;
+ buf_priv->discard = 0;
+ buf_priv->dispatched = 0;
- if (test_and_set_bit(0, &dev->dma_flag)) {
- retval = EBUSY;
- goto sch_out_wakeup;
- }
-
- DRM_DEBUG("%s\n", __FUNCTION__);
- if (!dev_priv) {
- DRM_DEBUG("dev_priv is not set\n");
- return (0);
+ dev_priv->head->next = entry;
}
- if(test_bit(MGA_IN_FLUSH, &dev_priv->dispatch_status) ||
- test_bit(MGA_IN_WAIT, &dev_priv->dispatch_status) ||
- test_bit(MGA_IN_GETBUF, &dev_priv->dispatch_status)) {
- locked = 1;
- }
-
- if (!locked &&
- !drm_lock_take(&dev->lock.hw_lock->lock, DRM_KERNEL_CONTEXT)) {
- clear_bit(0, &dev->dma_flag);
- DRM_DEBUG("Not locked\n");
- retval = EBUSY;
- goto sch_out_wakeup;
- }
+ return 0;
+}
- if(!test_and_set_bit(MGA_IN_DISPATCH, &dev_priv->dispatch_status)) {
- /* Fire dma buffer */
- if(mga_decide_to_fire(dev)) {
- clear_bit(MGA_BUF_FORCE_FIRE,
- &dev_priv->next_prim->buffer_status);
- if(dev_priv->current_prim == dev_priv->next_prim) {
- /* Schedule overflow for a later time */
- set_bit(MGA_BUF_NEEDS_OVERFLOW,
- &dev_priv->next_prim->buffer_status);
- }
- mga_fire_primary(dev, dev_priv->next_prim);
- } else {
- clear_bit(MGA_IN_DISPATCH, &dev_priv->dispatch_status);
- }
- }
-
- if (!locked) {
- if (drm_lock_free(dev, &dev->lock.hw_lock->lock,
- DRM_KERNEL_CONTEXT)) {
- DRM_ERROR("\n");
- }
- }
- clear_bit(0, &dev->dma_flag);
-sch_out_wakeup:
- if(test_bit(MGA_IN_FLUSH, &dev_priv->dispatch_status) &&
- atomic_read(&dev_priv->pending_bufs) == 0) {
- /* Everything has been processed by the hardware */
- clear_bit(MGA_IN_FLUSH, &dev_priv->dispatch_status);
- wakeup(&dev_priv->flush_queue);
- }
+static void mga_freelist_cleanup( drm_device_t *dev )
+{
+ drm_mga_private_t *dev_priv = dev->dev_private;
+ drm_mga_freelist_t *entry;
+ drm_mga_freelist_t *next;
+ DRM_DEBUG( "%s\n", __FUNCTION__ );
- if(test_bit(MGA_IN_GETBUF, &dev_priv->dispatch_status) &&
- dev_priv->tail->age < dev_priv->last_prim_age)
- wakeup(&dev_priv->buf_queue);
+ entry = dev_priv->head;
+ while ( entry ) {
+ next = entry->next;
+ DRM(free)( entry, sizeof(drm_mga_freelist_t), DRM_MEM_DRIVER );
+ entry = next;
+ }
- return retval;
+ dev_priv->head = dev_priv->tail = NULL;
}
-static void mga_dma_service(void *arg)
+#if 0
+/* FIXME: Still needed?
+ */
+static void mga_freelist_reset( drm_device_t *dev )
{
- drm_device_t *dev = (drm_device_t *)arg;
- drm_mga_private_t *dev_priv = (drm_mga_private_t *)dev->dev_private;
- drm_mga_prim_buf_t *last_prim_buffer;
-
- atomic_inc(&dev->total_irq);
- if((MGA_READ(MGAREG_STATUS) & 0x00000001) != 0x00000001) return;
- MGA_WRITE(MGAREG_ICLEAR, 0x00000001);
- last_prim_buffer = dev_priv->last_prim;
- last_prim_buffer->num_dwords = 0;
- last_prim_buffer->sec_used = 0;
- dev_priv->sarea_priv->last_dispatch =
- dev_priv->last_prim_age = last_prim_buffer->prim_age;
- clear_bit(MGA_BUF_IN_USE, &last_prim_buffer->buffer_status);
- clear_bit(MGA_BUF_SWAP_PENDING, &last_prim_buffer->buffer_status);
- clear_bit(MGA_IN_DISPATCH, &dev_priv->dispatch_status);
- atomic_dec(&dev_priv->pending_bufs);
- taskqueue_enqueue(taskqueue_swi, &dev->task);
- wakeup(&dev_priv->wait_queue);
+ drm_device_dma_t *dma = dev->dma;
+ drm_buf_t *buf;
+ drm_mga_buf_priv_t *buf_priv;
+ int i;
+
+ for ( i = 0 ; i < dma->buf_count ; i++ ) {
+ buf = dma->buflist[i];
+ buf_priv = buf->dev_private;
+ SET_AGE( &buf_priv->list_entry->age,
+ MGA_BUFFER_FREE, 0 );
+ }
}
+#endif
-static void mga_dma_task_queue(void *device, int pending)
+static drm_buf_t *mga_freelist_get( drm_device_t *dev )
{
- DRM_DEBUG("%s\n", __FUNCTION__);
- mga_dma_schedule((drm_device_t *)device, 0);
+ drm_mga_private_t *dev_priv = dev->dev_private;
+ drm_mga_freelist_t *next;
+ drm_mga_freelist_t *prev;
+ drm_mga_freelist_t *tail = dev_priv->tail;
+ u32 head, wrap;
+ DRM_DEBUG( "%s:\n", __FUNCTION__ );
+
+ head = MGA_READ( MGA_PRIMADDRESS );
+ wrap = dev_priv->sarea_priv->last_wrap;
+
+ DRM_DEBUG( " tail=0x%06lx %d\n",
+ tail->age.head ?
+ tail->age.head - dev_priv->primary->offset : 0,
+ tail->age.wrap );
+ DRM_DEBUG( " head=0x%06lx %d\n",
+ head - dev_priv->primary->offset, wrap );
+
+ if ( TEST_AGE( &tail->age, head, wrap ) ) {
+ prev = dev_priv->tail->prev;
+ next = dev_priv->tail;
+ prev->next = NULL;
+ next->prev = next->next = NULL;
+ dev_priv->tail = prev;
+ SET_AGE( &next->age, MGA_BUFFER_USED, 0 );
+ return next->buf;
+ }
+
+ DRM_DEBUG( "returning NULL!\n" );
+ return NULL;
}
-int mga_dma_cleanup(drm_device_t *dev)
+int mga_freelist_put( drm_device_t *dev, drm_buf_t *buf )
{
- DRM_DEBUG("%s\n", __FUNCTION__);
-
- if(dev->dev_private) {
- drm_mga_private_t *dev_priv =
- (drm_mga_private_t *) dev->dev_private;
-
- if (dev->irq) mga_flush_queue(dev);
- mga_dma_quiescent(dev);
- if(dev_priv->ioremap) {
- int temp = (dev_priv->warp_ucode_size +
- dev_priv->primary_size +
- PAGE_SIZE - 1) / PAGE_SIZE * PAGE_SIZE;
-
- drm_ioremapfree((void *) dev_priv->ioremap, temp);
- }
- if(dev_priv->real_status_page != 0UL) {
- mga_free_page(dev, dev_priv->real_status_page);
- }
- if(dev_priv->prim_bufs != NULL) {
- int i;
- for(i = 0; i < MGA_NUM_PRIM_BUFS; i++) {
- if(dev_priv->prim_bufs[i] != NULL) {
- drm_free(dev_priv->prim_bufs[i],
- sizeof(drm_mga_prim_buf_t),
- DRM_MEM_DRIVER);
- }
- }
- drm_free(dev_priv->prim_bufs, sizeof(void *) *
- (MGA_NUM_PRIM_BUFS + 1),
- DRM_MEM_DRIVER);
- }
- if(dev_priv->head != NULL) {
- mga_freelist_cleanup(dev);
- }
+ drm_mga_private_t *dev_priv = dev->dev_private;
+ drm_mga_buf_priv_t *buf_priv = buf->dev_private;
+ drm_mga_freelist_t *head, *entry, *prev;
+ DRM_DEBUG( "%s: age=0x%06lx wrap=%d\n",
+ __FUNCTION__,
+ buf_priv->list_entry->age.head -
+ dev_priv->primary->offset,
+ buf_priv->list_entry->age.wrap );
- drm_free(dev->dev_private, sizeof(drm_mga_private_t),
- DRM_MEM_DRIVER);
- dev->dev_private = NULL;
+ entry = buf_priv->list_entry;
+ head = dev_priv->head;
+
+ if ( buf_priv->list_entry->age.head == MGA_BUFFER_USED ) {
+ SET_AGE( &entry->age, MGA_BUFFER_FREE, 0 );
+ prev = dev_priv->tail;
+ prev->next = entry;
+ entry->prev = prev;
+ entry->next = NULL;
+ } else {
+ prev = head->next;
+ head->next = entry;
+ prev->prev = entry;
+ entry->prev = head;
+ entry->next = prev;
}
return 0;
}
-static int mga_dma_initialize(drm_device_t *dev, drm_mga_init_t *init) {
+
+/* ================================================================
+ * DMA initialization, cleanup
+ */
+
+static int mga_do_init_dma( drm_device_t *dev, drm_mga_init_t *init )
+{
drm_mga_private_t *dev_priv;
- drm_map_t *sarea_map = NULL;
+ drm_map_list_entry_t *listentry;
+ int ret;
+ DRM_DEBUG( "%s\n", __FUNCTION__ );
- dev_priv = drm_alloc(sizeof(drm_mga_private_t), DRM_MEM_DRIVER);
- if(dev_priv == NULL) return ENOMEM;
- dev->dev_private = (void *) dev_priv;
+ dev_priv = DRM(alloc)( sizeof(drm_mga_private_t), DRM_MEM_DRIVER );
+ if ( !dev_priv )
+ DRM_OS_RETURN(ENOMEM);
- memset(dev_priv, 0, sizeof(drm_mga_private_t));
+ memset( dev_priv, 0, sizeof(drm_mga_private_t) );
- if((init->reserved_map_idx >= dev->map_count) ||
- (init->buffer_map_idx >= dev->map_count)) {
- mga_dma_cleanup(dev);
- DRM_DEBUG("reserved_map or buffer_map are invalid\n");
- return EINVAL;
- }
-
- dev_priv->reserved_map_idx = init->reserved_map_idx;
- dev_priv->buffer_map_idx = init->buffer_map_idx;
- sarea_map = dev->maplist[0];
- dev_priv->sarea_priv = (drm_mga_sarea_t *)
- ((u_int8_t *)sarea_map->handle +
- init->sarea_priv_offset);
-
- /* Scale primary size to the next page */
dev_priv->chipset = init->chipset;
- dev_priv->frontOffset = init->frontOffset;
- dev_priv->backOffset = init->backOffset;
- dev_priv->depthOffset = init->depthOffset;
- dev_priv->textureOffset = init->textureOffset;
- dev_priv->textureSize = init->textureSize;
- dev_priv->cpp = init->cpp;
- dev_priv->sgram = init->sgram;
- dev_priv->stride = init->stride;
-
- dev_priv->mAccess = init->mAccess;
- dev_priv->flush_queue = 0;
- dev_priv->WarpPipe = 0xff000000;
- dev_priv->vertexsize = 0;
-
- DRM_DEBUG("chipset: %d ucode_size: %d backOffset: %x depthOffset: %x\n",
- dev_priv->chipset, dev_priv->warp_ucode_size,
- dev_priv->backOffset, dev_priv->depthOffset);
- DRM_DEBUG("cpp: %d sgram: %d stride: %d maccess: %x\n",
- dev_priv->cpp, dev_priv->sgram, dev_priv->stride,
- dev_priv->mAccess);
-
- memcpy(&dev_priv->WarpIndex, &init->WarpIndex,
- sizeof(drm_mga_warp_index_t) * MGA_MAX_WARP_PIPES);
-
- if(mga_init_primary_bufs(dev, init) != 0) {
- DRM_ERROR("Can not initialize primary buffers\n");
- mga_dma_cleanup(dev);
- return ENOMEM;
+
+ dev_priv->usec_timeout = MGA_DEFAULT_USEC_TIMEOUT;
+
+ if ( init->sgram ) {
+ dev_priv->clear_cmd = MGA_DWGCTL_CLEAR | MGA_ATYPE_BLK;
+ } else {
+ dev_priv->clear_cmd = MGA_DWGCTL_CLEAR | MGA_ATYPE_RSTR;
}
- dev_priv->real_status_page = mga_alloc_page(dev);
- if(dev_priv->real_status_page == 0UL) {
- mga_dma_cleanup(dev);
- DRM_ERROR("Can not allocate status page\n");
- return ENOMEM;
+ dev_priv->maccess = init->maccess;
+
+ dev_priv->fb_cpp = init->fb_cpp;
+ dev_priv->front_offset = init->front_offset;
+ dev_priv->front_pitch = init->front_pitch;
+ dev_priv->back_offset = init->back_offset;
+ dev_priv->back_pitch = init->back_pitch;
+
+ dev_priv->depth_cpp = init->depth_cpp;
+ dev_priv->depth_offset = init->depth_offset;
+ dev_priv->depth_pitch = init->depth_pitch;
+
+ /* FIXME: Need to support AGP textures...
+ */
+ dev_priv->texture_offset = init->texture_offset[0];
+ dev_priv->texture_size = init->texture_size[0];
+
+ TAILQ_FOREACH(listentry, dev->maplist, link) {
+ drm_map_t *map = listentry->map;
+ if (map->type == _DRM_SHM &&
+ map->flags & _DRM_CONTAINS_LOCK) {
+ dev_priv->sarea = map;
+ break;
+ }
}
- dev_priv->status_page = (void*)dev_priv->real_status_page; /* XXX wants nocache */
+ if(!dev_priv->sarea) {
+ DRM_ERROR( "failed to find sarea!\n" );
+ /* Assign dev_private so we can do cleanup. */
+ dev->dev_private = (void *)dev_priv;
+ mga_do_cleanup_dma( dev );
+ DRM_OS_RETURN(EINVAL);
+ }
+
+
+ DRM_FIND_MAP( dev_priv->fb, init->fb_offset );
+ if(!dev_priv->fb) {
+ DRM_ERROR( "failed to find framebuffer!\n" );
+ /* Assign dev_private so we can do cleanup. */
+ dev->dev_private = (void *)dev_priv;
+ mga_do_cleanup_dma( dev );
+ DRM_OS_RETURN(EINVAL);
+ }
+ DRM_FIND_MAP( dev_priv->mmio, init->mmio_offset );
+ if(!dev_priv->mmio) {
+ DRM_ERROR( "failed to find mmio region!\n" );
+ /* Assign dev_private so we can do cleanup. */
+ dev->dev_private = (void *)dev_priv;
+ mga_do_cleanup_dma( dev );
+ DRM_OS_RETURN(EINVAL);
+ }
+ DRM_FIND_MAP( dev_priv->status, init->status_offset );
+ if(!dev_priv->status) {
+ DRM_ERROR( "failed to find status page!\n" );
+ /* Assign dev_private so we can do cleanup. */
+ dev->dev_private = (void *)dev_priv;
+ mga_do_cleanup_dma( dev );
+ DRM_OS_RETURN(EINVAL);
+ }
+ DRM_FIND_MAP( dev_priv->warp, init->warp_offset );
+ if(!dev_priv->warp) {
+ DRM_ERROR( "failed to find warp microcode region!\n" );
+ /* Assign dev_private so we can do cleanup. */
+ dev->dev_private = (void *)dev_priv;
+ mga_do_cleanup_dma( dev );
+ DRM_OS_RETURN(EINVAL);
+ }
+ DRM_FIND_MAP( dev_priv->primary, init->primary_offset );
+ if(!dev_priv->primary) {
+ DRM_ERROR( "failed to find primary dma region!\n" );
+ /* Assign dev_private so we can do cleanup. */
+ dev->dev_private = (void *)dev_priv;
+ mga_do_cleanup_dma( dev );
+ DRM_OS_RETURN(EINVAL);
+ }
+ DRM_FIND_MAP( dev_priv->buffers, init->buffers_offset );
+ if(!dev_priv->buffers) {
+ DRM_ERROR( "failed to find dma buffer region!\n" );
+ /* Assign dev_private so we can do cleanup. */
+ dev->dev_private = (void *)dev_priv;
+ mga_do_cleanup_dma( dev );
+ DRM_OS_RETURN(EINVAL);
+ }
+
+ dev_priv->sarea_priv =
+ (drm_mga_sarea_t *)((u8 *)dev_priv->sarea->handle +
+ init->sarea_priv_offset);
+
+ DRM_IOREMAP( dev_priv->warp );
+ DRM_IOREMAP( dev_priv->primary );
+ DRM_IOREMAP( dev_priv->buffers );
+
+ if(!dev_priv->warp->handle ||
+ !dev_priv->primary->handle ||
+ !dev_priv->buffers->handle ) {
+ DRM_ERROR( "failed to ioremap agp regions!\n" );
+ /* Assign dev_private so we can do cleanup. */
+ dev->dev_private = (void *)dev_priv;
+ mga_do_cleanup_dma( dev );
+ DRM_OS_RETURN(ENOMEM);
+ }
+
+ ret = mga_warp_install_microcode( dev_priv );
+ if ( ret < 0 ) {
+ DRM_ERROR( "failed to install WARP ucode!\n" );
+ /* Assign dev_private so we can do cleanup. */
+ dev->dev_private = (void *)dev_priv;
+ mga_do_cleanup_dma( dev );
+ DRM_OS_RETURN(ret);
+ }
+
+ ret = mga_warp_init( dev_priv );
+ if ( ret < 0 ) {
+ DRM_ERROR( "failed to init WARP engine!\n" );
+ /* Assign dev_private so we can do cleanup. */
+ dev->dev_private = (void *)dev_priv;
+ mga_do_cleanup_dma( dev );
+ DRM_OS_RETURN(ret);
+ }
+
+ dev_priv->prim.status = (u32 *)dev_priv->status->handle;
+
+ mga_do_wait_for_idle( dev_priv );
+
+ /* Init the primary DMA registers.
+ */
+ MGA_WRITE( MGA_PRIMADDRESS,
+ dev_priv->primary->offset | MGA_DMA_GENERAL );
#if 0
- dev_priv->status_page =
- ioremap_nocache(virt_to_bus((void *)dev_priv->real_status_page),
- PAGE_SIZE);
-
- if(dev_priv->status_page == NULL) {
- mga_dma_cleanup(dev);
- DRM_ERROR("Can not remap status page\n");
- return ENOMEM;
- }
+ MGA_WRITE( MGA_PRIMPTR,
+ virt_to_bus((void *)dev_priv->prim.status) |
+ MGA_PRIMPTREN0 | /* Soft trap, SECEND, SETUPEND */
+ MGA_PRIMPTREN1 ); /* DWGSYNC */
#endif
- /* Write status page when secend or softrap occurs */
- MGA_WRITE(MGAREG_PRIMPTR,
- vtophys((void *)dev_priv->real_status_page) | 0x00000003);
-
-
- /* Private is now filled in, initialize the hardware */
- {
- PRIMLOCALS;
- PRIMGETPTR( dev_priv );
-
- PRIMOUTREG(MGAREG_DMAPAD, 0);
- PRIMOUTREG(MGAREG_DMAPAD, 0);
- PRIMOUTREG(MGAREG_DWGSYNC, 0x0100);
- PRIMOUTREG(MGAREG_SOFTRAP, 0);
- /* Poll for the first buffer to insure that
- * the status register will be correct
- */
-
- mga_flush_write_combine();
- MGA_WRITE(MGAREG_PRIMADDRESS, phys_head | TT_GENERAL);
-
- MGA_WRITE(MGAREG_PRIMEND, ((phys_head + num_dwords * 4) |
- PDEA_pagpxfer_enable));
-
- while(MGA_READ(MGAREG_DWGSYNC) != 0x0100) ;
+ dev_priv->prim.start = (u8 *)dev_priv->primary->handle;
+ dev_priv->prim.end = ((u8 *)dev_priv->primary->handle
+ + dev_priv->primary->size);
+ dev_priv->prim.size = dev_priv->primary->size;
+
+ dev_priv->prim.tail = 0;
+ dev_priv->prim.space = dev_priv->prim.size;
+ dev_priv->prim.wrapped = 0;
+
+ dev_priv->prim.last_flush = 0;
+ dev_priv->prim.last_wrap = 0;
+
+ dev_priv->prim.high_mark = 256 * DMA_BLOCK_SIZE;
+
+
+ dev_priv->prim.status[0] = dev_priv->primary->offset;
+ dev_priv->prim.status[1] = 0;
+
+ dev_priv->sarea_priv->last_wrap = 0;
+ dev_priv->sarea_priv->last_frame.head = 0;
+ dev_priv->sarea_priv->last_frame.wrap = 0;
+
+ if ( mga_freelist_init( dev, dev_priv ) < 0 ) {
+ DRM_ERROR( "could not initialize freelist\n" );
+ /* Assign dev_private so we can do cleanup. */
+ dev->dev_private = (void *)dev_priv;
+ mga_do_cleanup_dma( dev );
+ DRM_OS_RETURN(ENOMEM);
}
- if(mga_freelist_init(dev) != 0) {
- DRM_ERROR("Could not initialize freelist\n");
- mga_dma_cleanup(dev);
- return ENOMEM;
+ /* Make dev_private visable to others. */
+ dev->dev_private = (void *)dev_priv;
+ return 0;
+}
+
+int mga_do_cleanup_dma( drm_device_t *dev )
+{
+ DRM_DEBUG( "%s\n", __FUNCTION__ );
+
+ if ( dev->dev_private ) {
+ drm_mga_private_t *dev_priv = dev->dev_private;
+
+ DRM_IOREMAPFREE( dev_priv->warp );
+ DRM_IOREMAPFREE( dev_priv->primary );
+ DRM_IOREMAPFREE( dev_priv->buffers );
+
+ if ( dev_priv->head != NULL ) {
+ mga_freelist_cleanup( dev );
+ }
+
+ DRM(free)( dev->dev_private, sizeof(drm_mga_private_t),
+ DRM_MEM_DRIVER );
+ dev->dev_private = NULL;
}
+
return 0;
}
-int
-mga_dma_init(dev_t kdev, u_long cmd, caddr_t data, int flags, struct proc *p)
+int mga_dma_init( DRM_OS_IOCTL )
{
- drm_device_t *dev = kdev->si_drv1;
+ DRM_OS_DEVICE;
drm_mga_init_t init;
-
- DRM_DEBUG("%s\n", __FUNCTION__);
- init = *(drm_mga_init_t *) data;
-
- switch(init.func) {
+ DRM_OS_KRNFROMUSR( init, (drm_mga_init_t *) data, sizeof(init) );
+
+ switch ( init.func ) {
case MGA_INIT_DMA:
- return mga_dma_initialize(dev, &init);
+ return mga_do_init_dma( dev, &init );
case MGA_CLEANUP_DMA:
- return mga_dma_cleanup(dev);
+ return mga_do_cleanup_dma( dev );
}
- return EINVAL;
+ DRM_OS_RETURN( EINVAL );
}
-int mga_irq_install(drm_device_t *dev, int irq)
+
+/* ================================================================
+ * Primary DMA stream management
+ */
+
+int mga_dma_flush( DRM_OS_IOCTL )
{
- int rid;
- int retcode;
+ DRM_OS_DEVICE;
+ drm_mga_private_t *dev_priv = (drm_mga_private_t *)dev->dev_private;
+ drm_lock_t lock;
- if (!irq) return EINVAL;
-
- lockmgr(&dev->dev_lock, LK_EXCLUSIVE, 0, curproc);
- if (dev->irq) {
- lockmgr(&dev->dev_lock, LK_RELEASE, 0, curproc);
- return EBUSY;
- }
- lockmgr(&dev->dev_lock, LK_RELEASE, 0, curproc);
-
- DRM_DEBUG("install irq handler %d\n", irq);
-
- dev->context_flag = 0;
- dev->interrupt_flag = 0;
- dev->dma_flag = 0;
- dev->dma->next_buffer = NULL;
- dev->dma->next_queue = NULL;
- dev->dma->this_buffer = NULL;
- TASK_INIT(&dev->task, 0, mga_dma_task_queue, dev);
-
- /* Before installing handler */
- MGA_WRITE(MGAREG_IEN, 0);
- /* Install handler */
- rid = 0;
- dev->irq = bus_alloc_resource(dev->device, SYS_RES_IRQ, &rid,
- 0, ~0, 1, RF_SHAREABLE);
- if (!dev->irq)
- return ENOENT;
-
- retcode = bus_setup_intr(dev->device, dev->irq, INTR_TYPE_TTY,
- mga_dma_service, dev, &dev->irqh);
- if (retcode) {
- bus_release_resource(dev->device, SYS_RES_IRQ, 0, dev->irq);
- dev->irq = 0;
- return retcode;
- }
+ LOCK_TEST_WITH_RETURN( dev );
- /* After installing handler */
- MGA_WRITE(MGAREG_ICLEAR, 0x00000001);
- MGA_WRITE(MGAREG_IEN, 0x00000001);
- return 0;
-}
+ DRM_OS_KRNFROMUSR( lock, (drm_lock_t *) data, sizeof(lock) );
-int mga_irq_uninstall(drm_device_t *dev)
-{
- if (!dev->irq)
- return EINVAL;
-
- DRM_DEBUG("remove irq handler %ld\n", rman_get_start(dev->irq));
- MGA_WRITE(MGAREG_ICLEAR, 0x00000001);
- MGA_WRITE(MGAREG_IEN, 0);
+ DRM_DEBUG( "%s: %s%s%s\n",
+ __FUNCTION__,
+ (lock.flags & _DRM_LOCK_FLUSH) ? "flush, " : "",
+ (lock.flags & _DRM_LOCK_FLUSH_ALL) ? "flush all, " : "",
+ (lock.flags & _DRM_LOCK_QUIESCENT) ? "idle, " : "" );
- bus_teardown_intr(dev->device, dev->irq, dev->irqh);
- bus_release_resource(dev->device, SYS_RES_IRQ, 0, dev->irq);
- dev->irq = 0;
+ WRAP_WAIT_WITH_RETURN( dev_priv );
- return 0;
-}
+ if ( lock.flags & (_DRM_LOCK_FLUSH | _DRM_LOCK_FLUSH_ALL) ) {
+ mga_do_dma_flush( dev_priv );
+ }
-int mga_control(dev_t kdev, u_long cmd, caddr_t data,
- int flags, struct proc *p)
-{
- drm_device_t *dev = kdev->si_drv1;
- drm_control_t ctl;
-
- ctl = *(drm_control_t *) data;
-
- DRM_DEBUG("%s\n", __FUNCTION__);
-
- switch (ctl.func) {
- case DRM_INST_HANDLER:
- return mga_irq_install(dev, ctl.irq);
- case DRM_UNINST_HANDLER:
- return mga_irq_uninstall(dev);
- default:
- return EINVAL;
+ if ( lock.flags & _DRM_LOCK_QUIESCENT ) {
+#if MGA_DMA_DEBUG
+ int ret = mga_do_wait_for_idle( dev_priv );
+ if ( ret )
+ DRM_INFO( __FUNCTION__": -EBUSY\n" );
+ return ret;
+#else
+ return mga_do_wait_for_idle( dev_priv );
+#endif
+ } else {
+ return 0;
}
}
-static int mga_flush_queue(drm_device_t *dev)
+int mga_dma_reset( DRM_OS_IOCTL )
{
- drm_mga_private_t *dev_priv = (drm_mga_private_t *)dev->dev_private;
- int ret = 0;
- int s;
-
- DRM_DEBUG("%s\n", __FUNCTION__);
-
- if(!dev_priv) return 0;
-
- if(dev_priv->next_prim->num_dwords != 0) {
- s = splsofttq();
- set_bit(MGA_IN_FLUSH, &dev_priv->dispatch_status);
- for (;;) {
- mga_dma_schedule(dev, 0);
- if (!test_bit(MGA_IN_FLUSH,
- &dev_priv->dispatch_status))
- break;
- atomic_inc(&dev->total_sleeps);
- ret = tsleep(&dev_priv->flush_queue, PZERO|PCATCH,
- "mgafq", 0);
- if (ret) {
- clear_bit(MGA_IN_FLUSH,
- &dev_priv->dispatch_status);
- break;
- }
- }
- splx(s);
- }
- return ret;
+ DRM_OS_DEVICE;
+ drm_mga_private_t *dev_priv = (drm_mga_private_t *)dev->dev_private;
+
+ LOCK_TEST_WITH_RETURN( dev );
+
+ return mga_do_dma_reset( dev_priv );
}
-/* Must be called with the lock held */
-void mga_reclaim_buffers(drm_device_t *dev, pid_t pid)
+
+/* ================================================================
+ * DMA buffer management
+ */
+
+#if 0
+static int mga_dma_get_buffers( drm_device_t *dev, drm_dma_t *d )
{
- drm_device_dma_t *dma = dev->dma;
- int i;
+ drm_buf_t *buf;
+ int i;
- if (!dma) return;
- if(dev->dev_private == NULL) return;
- if(dma->buflist == NULL) return;
+ for ( i = d->granted_count ; i < d->request_count ; i++ ) {
+ buf = mga_freelist_get( dev );
+ if ( !buf )
+ DRM_OS_RETURN( EAGAIN );
- DRM_DEBUG("%s\n", __FUNCTION__);
- mga_flush_queue(dev);
+ buf->pid = current->pid;
- for (i = 0; i < dma->buf_count; i++) {
- drm_buf_t *buf = dma->buflist[ i ];
- drm_mga_buf_priv_t *buf_priv = buf->dev_private;
+ if ( DRM_OS_COPYTOUSR( &d->request_indices[i],
+ &buf->idx, sizeof(buf->idx) ) )
+ DRM_OS_RETURN( EFAULT );
+ if ( DRM_OS_COPYTOUSR( &d->request_sizes[i],
+ &buf->total, sizeof(buf->total) ) )
+ DRM_OS_RETURN( EFAULT );
- /* Only buffers that need to get reclaimed ever
- * get set to free
- */
- if (buf->pid == pid && buf_priv) {
- if(buf_priv->my_freelist->age == MGA_BUF_USED)
- buf_priv->my_freelist->age = MGA_BUF_FREE;
- }
+ d->granted_count++;
}
+ return 0;
}
+#endif /* 0 */
-int mga_lock(dev_t kdev, u_long cmd, caddr_t data,
- int flags, struct proc *p)
+int mga_dma_buffers( DRM_OS_IOCTL )
{
- drm_device_t *dev = kdev->si_drv1;
- int ret = 0;
- drm_lock_t lock;
+ DRM_OS_DEVICE;
+ drm_device_dma_t *dma = dev->dma;
+ drm_mga_private_t *dev_priv = (drm_mga_private_t *)dev->dev_private;
+ drm_dma_t d;
+ drm_buf_t *buf;
+ int i;
+ int ret = 0;
- DRM_DEBUG("%s\n", __FUNCTION__);
- lock = *(drm_lock_t *) data;
+ LOCK_TEST_WITH_RETURN( dev );
- if (lock.context == DRM_KERNEL_CONTEXT) {
- DRM_ERROR("Process %d using kernel context %d\n",
- p->p_pid, lock.context);
- return EINVAL;
- }
-
- DRM_DEBUG("%d (pid %d) requests lock (0x%08x), flags = 0x%08x\n",
- lock.context, p->p_pid, dev->lock.hw_lock->lock,
- lock.flags);
+ DRM_OS_KRNFROMUSR( d, (drm_dma_t *) data, sizeof(d) );
- if (lock.context < 0) {
- return EINVAL;
- }
-
- /* Only one queue:
+ /* Please don't send us buffers.
*/
-
- if (!ret) {
- atomic_inc(&dev->lock.lock_queue);
- for (;;) {
- if (!dev->lock.hw_lock) {
- /* Device has been unregistered */
- ret = EINTR;
- break;
- }
- if (drm_lock_take(&dev->lock.hw_lock->lock,
- lock.context)) {
- dev->lock.pid = p->p_pid;
- dev->lock.lock_time = ticks;
- atomic_inc(&dev->total_locks);
- break; /* Got lock */
- }
-
- /* Contention */
- atomic_inc(&dev->total_sleeps);
- ret = tsleep(&dev->lock.lock_queue, PZERO|PCATCH,
- "mgal2", 0);
- if (ret)
- break;
- }
- atomic_dec(&dev->lock.lock_queue);
+ if ( d.send_count != 0 ) {
+ DRM_ERROR( "Process %d trying to send %d buffers via drmDMA\n",
+ DRM_OS_CURRENTPID, d.send_count );
+ DRM_OS_RETURN( EINVAL );
}
-
- if (!ret) {
- if (lock.flags & _DRM_LOCK_QUIESCENT) {
- DRM_DEBUG("_DRM_LOCK_QUIESCENT\n");
- mga_flush_queue(dev);
- mga_dma_quiescent(dev);
- }
+
+ /* We'll send you buffers.
+ */
+ if ( d.request_count < 0 || d.request_count > dma->buf_count ) {
+ DRM_ERROR( "Process %d trying to get %d buffers (of %d max)\n",
+ DRM_OS_CURRENTPID, d.request_count, dma->buf_count );
+ DRM_OS_RETURN( EINVAL );
}
-
- if (ret) DRM_DEBUG("%d %s\n", lock.context, ret ? "interrupted" : "has lock");
- return ret;
-}
-
-int mga_flush_ioctl(dev_t kdev, u_long cmd, caddr_t data,
- int flags, struct proc *p)
-{
- drm_device_t *dev = kdev->si_drv1;
- drm_lock_t lock;
- drm_mga_private_t *dev_priv = (drm_mga_private_t *)dev->dev_private;
- int s;
- DRM_DEBUG("%s\n", __FUNCTION__);
- lock = *(drm_lock_t *) data;
+ WRAP_TEST_WITH_RETURN( dev_priv );
- if(!_DRM_LOCK_IS_HELD(dev->lock.hw_lock->lock)) {
- DRM_ERROR("mga_flush_ioctl called without lock held\n");
- return EINVAL;
- }
+ d.granted_count = 0;
- if(lock.flags & _DRM_LOCK_FLUSH || lock.flags & _DRM_LOCK_FLUSH_ALL) {
- drm_mga_prim_buf_t *temp_buf;
- temp_buf = dev_priv->current_prim;
-
- s = splsofttq();
- if(temp_buf && temp_buf->num_dwords) {
- set_bit(MGA_BUF_FORCE_FIRE, &temp_buf->buffer_status);
- mga_advance_primary(dev);
- }
- mga_dma_schedule(dev, 1);
- splx(s);
- }
- if(lock.flags & _DRM_LOCK_QUIESCENT) {
- mga_flush_queue(dev);
- mga_dma_quiescent(dev);
+ if ( d.request_count ) {
+ for ( i = d.granted_count ; i < d.request_count ; i++ ) {
+ buf = mga_freelist_get( dev );
+ if ( !buf )
+ DRM_OS_RETURN( EAGAIN );
+
+ buf->pid = DRM_OS_CURRENTPID;
+
+ if ( DRM_OS_COPYTOUSR( &d.request_indices[i],
+ &buf->idx, sizeof(buf->idx) ) )
+ DRM_OS_RETURN( EFAULT );
+ if ( DRM_OS_COPYTOUSR( &d.request_sizes[i],
+ &buf->total, sizeof(buf->total) ) )
+ DRM_OS_RETURN( EFAULT );
+
+ d.granted_count++;
+ }
+ ret = 0;
}
- return 0;
+ DRM_OS_KRNTOUSR( (drm_dma_t *) data, d, sizeof(d) );
+
+ return ret;
}
diff --git a/bsd/mga/mga_drv.c b/bsd/mga/mga_drv.c
index 77d7c440..206a77dc 100644
--- a/bsd/mga/mga_drv.c
+++ b/bsd/mga/mga_drv.c
@@ -1,6 +1,6 @@
-/* mga_drv.c -- Matrox g200/g400 driver
+/* mga_drv.c -- Matrox G200/G400 driver -*- linux-c -*-
* Created: Mon Dec 13 01:56:22 1999 by jhartmann@precisioninsight.com
- *
+ *
* Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
* Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
* All Rights Reserved.
@@ -11,704 +11,88 @@
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
- *
+ *
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
- *
+ *
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
- * DEALINGS IN THE SOFTWARE.
- *
- * Authors: Rickard E. (Rik) Faith <faith@valinux.com>
- * Jeff Hartmann <jhartmann@valinux.com>
- *
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
*
+ * Authors:
+ * Rickard E. (Rik) Faith <faith@valinux.com>
+ * Gareth Hughes <gareth@valinux.com>
*/
-#include "drmP.h"
-#include "mga_drv.h"
+#include <sys/types.h>
+#include <sys/bus.h>
#include <pci/pcivar.h>
+#include <opt_drm_linux.h>
-MODULE_DEPEND(mga, drm, 1, 1, 1);
-MODULE_DEPEND(mga, agp, 1, 1, 1);
-
-#define MGA_NAME "mga"
-#define MGA_DESC "Matrox g200/g400"
-#define MGA_DATE "20000928"
-#define MGA_MAJOR 2
-#define MGA_MINOR 0
-#define MGA_PATCHLEVEL 0
-
-drm_ctx_t mga_res_ctx;
-
-static int mga_probe(device_t dev)
-{
- const char *s = 0;
-
- switch (pci_get_devid(dev)) {
- case 0x0525102b:
- s = "Matrox MGA G400 AGP graphics accelerator";
- break;
-
- case 0x0521102b:
- s = "Matrox MGA G200 AGP graphics accelerator";
- break;
- }
-
- if (s) {
- device_set_desc(dev, s);
- return 0;
- }
-
- return ENXIO;
-}
-
-static int mga_attach(device_t dev)
-{
- return mga_init(dev);
-}
-
-static int mga_detach(device_t dev)
-{
- mga_cleanup(dev);
- return 0;
-}
-
-static device_method_t mga_methods[] = {
- /* Device interface */
- DEVMETHOD(device_probe, mga_probe),
- DEVMETHOD(device_attach, mga_attach),
- DEVMETHOD(device_detach, mga_detach),
-
- { 0, 0 }
-};
-
-static driver_t mga_driver = {
- "drm",
- mga_methods,
- sizeof(drm_device_t),
-};
-
-static devclass_t mga_devclass;
-#define MGA_SOFTC(unit) \
- ((drm_device_t *) devclass_get_softc(mga_devclass, unit))
-
-DRIVER_MODULE(if_mga, pci, mga_driver, mga_devclass, 0, 0);
-
-#define CDEV_MAJOR 145
- /* mga_drv.c */
-static struct cdevsw mga_cdevsw = {
- /* open */ mga_open,
- /* close */ mga_close,
- /* read */ drm_read,
- /* write */ drm_write,
- /* ioctl */ mga_ioctl,
- /* poll */ drm_poll,
- /* mmap */ drm_mmap,
- /* strategy */ nostrategy,
- /* name */ "mga",
- /* maj */ CDEV_MAJOR,
- /* dump */ nodump,
- /* psize */ nopsize,
- /* flags */ D_TTY | D_TRACKCLOSE,
- /* bmaj */ -1
-};
-
-static drm_ioctl_desc_t mga_ioctls[] = {
- [DRM_IOCTL_NR(DRM_IOCTL_VERSION)] = { mga_version, 0, 0 },
- [DRM_IOCTL_NR(DRM_IOCTL_GET_UNIQUE)] = { drm_getunique, 0, 0 },
- [DRM_IOCTL_NR(DRM_IOCTL_GET_MAGIC)] = { drm_getmagic, 0, 0 },
- [DRM_IOCTL_NR(DRM_IOCTL_IRQ_BUSID)] = { drm_irq_busid, 0, 1 },
-
- [DRM_IOCTL_NR(DRM_IOCTL_SET_UNIQUE)] = { drm_setunique, 1, 1 },
- [DRM_IOCTL_NR(DRM_IOCTL_BLOCK)] = { drm_block, 1, 1 },
- [DRM_IOCTL_NR(DRM_IOCTL_UNBLOCK)] = { drm_unblock, 1, 1 },
- [DRM_IOCTL_NR(DRM_IOCTL_CONTROL)] = { mga_control, 1, 1 },
- [DRM_IOCTL_NR(DRM_IOCTL_AUTH_MAGIC)] = { drm_authmagic, 1, 1 },
- [DRM_IOCTL_NR(DRM_IOCTL_ADD_MAP)] = { drm_addmap, 1, 1 },
- [DRM_IOCTL_NR(DRM_IOCTL_ADD_BUFS)] = { mga_addbufs, 1, 1 },
- [DRM_IOCTL_NR(DRM_IOCTL_MARK_BUFS)] = { mga_markbufs, 1, 1 },
- [DRM_IOCTL_NR(DRM_IOCTL_INFO_BUFS)] = { mga_infobufs, 1, 0 },
- [DRM_IOCTL_NR(DRM_IOCTL_MAP_BUFS)] = { mga_mapbufs, 1, 0 },
- [DRM_IOCTL_NR(DRM_IOCTL_FREE_BUFS)] = { mga_freebufs, 1, 0 },
+#include "mga.h"
+#include "drmP.h"
+#include "mga_drv.h"
- [DRM_IOCTL_NR(DRM_IOCTL_ADD_CTX)] = { mga_addctx, 1, 1 },
- [DRM_IOCTL_NR(DRM_IOCTL_RM_CTX)] = { mga_rmctx, 1, 1 },
- [DRM_IOCTL_NR(DRM_IOCTL_MOD_CTX)] = { mga_modctx, 1, 1 },
- [DRM_IOCTL_NR(DRM_IOCTL_GET_CTX)] = { mga_getctx, 1, 0 },
- [DRM_IOCTL_NR(DRM_IOCTL_SWITCH_CTX)] = { mga_switchctx, 1, 1 },
- [DRM_IOCTL_NR(DRM_IOCTL_NEW_CTX)] = { mga_newctx, 1, 1 },
- [DRM_IOCTL_NR(DRM_IOCTL_RES_CTX)] = { mga_resctx, 1, 0 },
- [DRM_IOCTL_NR(DRM_IOCTL_ADD_DRAW)] = { drm_adddraw, 1, 1 },
- [DRM_IOCTL_NR(DRM_IOCTL_RM_DRAW)] = { drm_rmdraw, 1, 1 },
+#define DRIVER_AUTHOR "Gareth Hughes, VA Linux Systems Inc."
- [DRM_IOCTL_NR(DRM_IOCTL_DMA)] = { mga_dma, 1, 0 },
+#define DRIVER_NAME "mga"
+#define DRIVER_DESC "Matrox G200/G400"
+#define DRIVER_DATE "20010321"
- [DRM_IOCTL_NR(DRM_IOCTL_LOCK)] = { mga_lock, 1, 0 },
- [DRM_IOCTL_NR(DRM_IOCTL_UNLOCK)] = { mga_unlock, 1, 0 },
- [DRM_IOCTL_NR(DRM_IOCTL_FINISH)] = { drm_finish, 1, 0 },
+#define DRIVER_MAJOR 3
+#define DRIVER_MINOR 0
+#define DRIVER_PATCHLEVEL 2
- [DRM_IOCTL_NR(DRM_IOCTL_AGP_ACQUIRE)] = { drm_agp_acquire, 1, 1 },
- [DRM_IOCTL_NR(DRM_IOCTL_AGP_RELEASE)] = { drm_agp_release, 1, 1 },
- [DRM_IOCTL_NR(DRM_IOCTL_AGP_ENABLE)] = { drm_agp_enable, 1, 1 },
- [DRM_IOCTL_NR(DRM_IOCTL_AGP_INFO)] = { drm_agp_info, 1, 0 },
- [DRM_IOCTL_NR(DRM_IOCTL_AGP_ALLOC)] = { drm_agp_alloc, 1, 1 },
- [DRM_IOCTL_NR(DRM_IOCTL_AGP_FREE)] = { drm_agp_free, 1, 1 },
- [DRM_IOCTL_NR(DRM_IOCTL_AGP_BIND)] = { drm_agp_bind, 1, 1 },
- [DRM_IOCTL_NR(DRM_IOCTL_AGP_UNBIND)] = { drm_agp_unbind, 1, 1 },
- [DRM_IOCTL_NR(DRM_IOCTL_MGA_INIT)] = { mga_dma_init, 1, 1 },
- [DRM_IOCTL_NR(DRM_IOCTL_MGA_SWAP)] = { mga_swap_bufs, 1, 0 },
- [DRM_IOCTL_NR(DRM_IOCTL_MGA_CLEAR)] = { mga_clear_bufs, 1, 0 },
- [DRM_IOCTL_NR(DRM_IOCTL_MGA_ILOAD)] = { mga_iload, 1, 0 },
- [DRM_IOCTL_NR(DRM_IOCTL_MGA_VERTEX)] = { mga_vertex, 1, 0 },
- [DRM_IOCTL_NR(DRM_IOCTL_MGA_FLUSH)] = { mga_flush_ioctl, 1, 0 },
- [DRM_IOCTL_NR(DRM_IOCTL_MGA_INDICES)] = { mga_indices, 1, 0 },
+/* List acquired from http://www.yourvote.com/pci/pcihdr.h and xc/xc/programs/Xserver/hw/xfree86/common/xf86PciInfo.h
+ * Please report to anholt@teleport.com inaccuracies or if a chip you have works that is marked unsupported here.
+ */
+drm_chipinfo_t DRM(devicelist)[] = {
+ {0x102b, 0x0520, 0, "Matrox G200 (PCI)"},
+ {0x102b, 0x0521, 1, "Matrox G200 (AGP)"},
+ {0x102b, 0x0525, 1, "Matrox G400 (AGP)"},
+ {0, 0, 0, NULL}
};
-#define MGA_IOCTL_COUNT DRM_ARRAY_SIZE(mga_ioctls)
-
-static int mga_setup(drm_device_t *dev)
-{
- int i;
-
- device_busy(dev->device);
-
- atomic_set(&dev->ioctl_count, 0);
- atomic_set(&dev->vma_count, 0);
- dev->buf_use = 0;
- atomic_set(&dev->buf_alloc, 0);
-
- drm_dma_setup(dev);
-
- atomic_set(&dev->total_open, 0);
- atomic_set(&dev->total_close, 0);
- atomic_set(&dev->total_ioctl, 0);
- atomic_set(&dev->total_irq, 0);
- atomic_set(&dev->total_ctx, 0);
- atomic_set(&dev->total_locks, 0);
- atomic_set(&dev->total_unlocks, 0);
- atomic_set(&dev->total_contends, 0);
- atomic_set(&dev->total_sleeps, 0);
-
- for (i = 0; i < DRM_HASH_SIZE; i++) {
- dev->magiclist[i].head = NULL;
- dev->magiclist[i].tail = NULL;
- }
- dev->maplist = NULL;
- dev->map_count = 0;
- dev->vmalist = NULL;
- dev->lock.hw_lock = NULL;
- dev->lock.lock_queue = 0;
- dev->queue_count = 0;
- dev->queue_reserved = 0;
- dev->queue_slots = 0;
- dev->queuelist = NULL;
- dev->irq = 0;
- dev->context_flag = 0;
- dev->interrupt_flag = 0;
- dev->dma_flag = 0;
- dev->last_context = 0;
- dev->last_switch = 0;
- dev->last_checked = 0;
- callout_init(&dev->timer);
- dev->context_wait = 0;
-
- timespecclear(&dev->ctx_start);
- timespecclear(&dev->lck_start);
-
- dev->buf_rp = dev->buf;
- dev->buf_wp = dev->buf;
- dev->buf_end = dev->buf + DRM_BSZ;
- bzero(&dev->buf_sel, sizeof dev->buf_sel);
- dev->buf_sigio = NULL;
- dev->buf_readers = 0;
- dev->buf_writers = 0;
- dev->buf_selecting = 0;
-
- DRM_DEBUG("\n");
-
- /* The kernel's context could be created here, but is now created
- in drm_dma_enqueue. This is more resource-efficient for
- hardware that does not do DMA, but may mean that
- drm_select_queue fails between the time the interrupt is
- initialized and the time the queues are initialized. */
-
- return 0;
-}
-
-
-static int mga_takedown(drm_device_t *dev)
-{
- int i;
- drm_magic_entry_t *pt, *next;
- drm_map_t *map;
- drm_vma_entry_t *vma, *vma_next;
-
- DRM_DEBUG("\n");
-
- if (dev->irq) mga_irq_uninstall(dev);
-
- lockmgr(&dev->dev_lock, LK_EXCLUSIVE, 0, curproc);
- callout_stop(&dev->timer);
-
- if (dev->devname) {
- drm_free(dev->devname, strlen(dev->devname)+1, DRM_MEM_DRIVER);
- dev->devname = NULL;
- }
-
- if (dev->unique) {
- drm_free(dev->unique, strlen(dev->unique)+1, DRM_MEM_DRIVER);
- dev->unique = NULL;
- dev->unique_len = 0;
- }
- /* Clear pid list */
- for (i = 0; i < DRM_HASH_SIZE; i++) {
- for (pt = dev->magiclist[i].head; pt; pt = next) {
- next = pt->next;
- drm_free(pt, sizeof(*pt), DRM_MEM_MAGIC);
- }
- dev->magiclist[i].head = dev->magiclist[i].tail = NULL;
- }
- /* Clear AGP information */
- if (dev->agp) {
- drm_agp_mem_t *entry;
- drm_agp_mem_t *nexte;
-
- /* Remove AGP resources, but leave dev->agp
- intact until cleanup is called. */
- for (entry = dev->agp->memory; entry; entry = nexte) {
- nexte = entry->next;
- if (entry->bound) drm_unbind_agp(entry->handle);
- drm_free_agp(entry->handle, entry->pages);
- drm_free(entry, sizeof(*entry), DRM_MEM_AGPLISTS);
- }
- dev->agp->memory = NULL;
-
- if (dev->agp->acquired)
- agp_release(dev->agp->agpdev);
-
- dev->agp->acquired = 0;
- dev->agp->enabled = 0;
- }
- /* Clear vma list (only built for debugging) */
- if (dev->vmalist) {
- for (vma = dev->vmalist; vma; vma = vma_next) {
- vma_next = vma->next;
- drm_free(vma, sizeof(*vma), DRM_MEM_VMAS);
- }
- dev->vmalist = NULL;
- }
-
- /* Clear map area and mtrr information */
- if (dev->maplist) {
- for (i = 0; i < dev->map_count; i++) {
- map = dev->maplist[i];
- switch (map->type) {
- case _DRM_REGISTERS:
- case _DRM_FRAME_BUFFER:
-#ifdef CONFIG_MTRR
- if (map->mtrr >= 0) {
- int retcode;
- retcode = mtrr_del(map->mtrr,
- map->offset,
- map->size);
- DRM_DEBUG("mtrr_del = %d\n", retcode);
- }
-#endif
- drm_ioremapfree(map->handle, map->size);
- break;
- case _DRM_SHM:
- drm_free_pages((unsigned long)map->handle,
- drm_order(map->size)
- - PAGE_SHIFT,
- DRM_MEM_SAREA);
- break;
- case _DRM_AGP:
- break;
- }
- drm_free(map, sizeof(*map), DRM_MEM_MAPS);
- }
- drm_free(dev->maplist,
- dev->map_count * sizeof(*dev->maplist),
- DRM_MEM_MAPS);
- dev->maplist = NULL;
- dev->map_count = 0;
- }
-
- if (dev->queuelist) {
- for (i = 0; i < dev->queue_count; i++) {
- drm_waitlist_destroy(&dev->queuelist[i]->waitlist);
- if (dev->queuelist[i]) {
- drm_free(dev->queuelist[i],
- sizeof(*dev->queuelist[0]),
- DRM_MEM_QUEUES);
- dev->queuelist[i] = NULL;
- }
- }
- drm_free(dev->queuelist,
- dev->queue_slots * sizeof(*dev->queuelist),
- DRM_MEM_QUEUES);
- dev->queuelist = NULL;
- }
-
- drm_dma_takedown(dev);
-
- dev->queue_count = 0;
- if (dev->lock.hw_lock) {
- dev->lock.hw_lock = NULL; /* SHM removed */
- dev->lock.pid = 0;
- wakeup(&dev->lock.lock_queue);
- }
- lockmgr(&dev->dev_lock, LK_RELEASE, 0, curproc);
-
- return 0;
-}
-
-/* mga_init is called via mga_attach at module load time, */
-
-int
-mga_init(device_t nbdev)
-{
- int retcode;
- drm_device_t *dev = device_get_softc(nbdev);
-
- DRM_DEBUG("\n");
-
- memset((void *)dev, 0, sizeof(*dev));
- simple_lock_init(&dev->count_lock);
- lockinit(&dev->dev_lock, PZERO, "drmlk", 0, 0);
-
-#if 0
- drm_parse_options(mga);
-#endif
- dev->device = nbdev;
- dev->devnode = make_dev(&mga_cdevsw,
- device_get_unit(nbdev),
- DRM_DEV_UID,
- DRM_DEV_GID,
- DRM_DEV_MODE,
- MGA_NAME);
- dev->name = MGA_NAME;
-
- DRM_DEBUG("doing mem init\n");
- drm_mem_init();
- DRM_DEBUG("doing proc init\n");
- drm_sysctl_init(dev);
- TAILQ_INIT(&dev->files);
- DRM_DEBUG("doing agp init\n");
- dev->agp = drm_agp_init();
- if(dev->agp == NULL) {
- DRM_INFO("The mga drm module requires the agp module"
- " to function correctly\nPlease load the agp"
- " module before you load the mga module\n");
- drm_sysctl_cleanup(dev);
- mga_takedown(dev);
- return ENOMEM;
- }
-#if 0
- dev->agp->agp_mtrr = mtrr_add(dev->agp->agp_info.aper_base,
- dev->agp->agp_info.aper_size * 1024 * 1024,
- MTRR_TYPE_WRCOMB,
- 1);
-#endif
- DRM_DEBUG("doing ctxbitmap init\n");
- if((retcode = drm_ctxbitmap_init(dev))) {
- DRM_ERROR("Cannot allocate memory for context bitmap.\n");
- drm_sysctl_cleanup(dev);
- mga_takedown(dev);
- return retcode;
- }
-
- DRM_INFO("Initialized %s %d.%d.%d %s on minor %d\n",
- MGA_NAME,
- MGA_MAJOR,
- MGA_MINOR,
- MGA_PATCHLEVEL,
- MGA_DATE,
- device_get_unit(nbdev));
-
- return 0;
-}
-
-/* mga_cleanup is called via cleanup_module at module unload time. */
-
-void mga_cleanup(device_t nbdev)
-{
- drm_device_t *dev = device_get_softc(nbdev);
-
- DRM_DEBUG("\n");
-
- drm_sysctl_cleanup(dev);
- destroy_dev(dev->devnode);
-
- DRM_INFO("Module unloaded\n");
- drm_ctxbitmap_cleanup(dev);
- mga_dma_cleanup(dev);
-#if 0
- if(dev->agp && dev->agp->agp_mtrr) {
- int retval;
- retval = mtrr_del(dev->agp->agp_mtrr,
- dev->agp->agp_info.aper_base,
- dev->agp->agp_info.aper_size * 1024*1024);
- DRM_DEBUG("mtrr_del = %d\n", retval);
- }
-#endif
-
- mga_takedown(dev);
- if (dev->agp) {
- drm_free(dev->agp, sizeof(*dev->agp), DRM_MEM_AGPLISTS);
- dev->agp = NULL;
- }
-}
-
-int
-mga_version(dev_t kdev, u_long cmd, caddr_t data, int flags, struct proc *p)
-{
- drm_version_t version;
- int len;
-
- version = *(drm_version_t *) data;
-
-#define DRM_COPY(name,value) \
- len = strlen(value); \
- if (len > name##_len) len = name##_len; \
- name##_len = strlen(value); \
- if (len && name) { \
- int error = copyout(value, name, len); \
- if (error) return error; \
- }
-
- version.version_major = MGA_MAJOR;
- version.version_minor = MGA_MINOR;
- version.version_patchlevel = MGA_PATCHLEVEL;
-
- DRM_COPY(version.name, MGA_NAME);
- DRM_COPY(version.date, MGA_DATE);
- DRM_COPY(version.desc, MGA_DESC);
-
- *(drm_version_t *) data = version;
- return 0;
-}
-
-int
-mga_open(dev_t kdev, int flags, int fmt, struct proc *p)
-{
- drm_device_t *dev = MGA_SOFTC(minor(kdev));
- int retcode = 0;
-
- DRM_DEBUG("open_count = %d\n", dev->open_count);
-
- device_busy(dev->device);
- if (!(retcode = drm_open_helper(kdev, flags, fmt, p, dev))) {
- atomic_inc(&dev->total_open);
- simple_lock(&dev->count_lock);
- if (!dev->open_count++) {
- simple_unlock(&dev->count_lock);
- retcode = mga_setup(dev);
- }
- simple_unlock(&dev->count_lock);
- }
- device_unbusy(dev->device);
-
- return retcode;
-}
-
-int
-mga_close(dev_t kdev, int flags, int fmt, struct proc *p)
-{
- drm_device_t *dev = kdev->si_drv1;
- drm_file_t *priv;
- int retcode = 0;
-
- DRM_DEBUG("pid = %d, open_count = %d\n",
- p->p_pid, dev->open_count);
-
- priv = drm_find_file_by_proc(dev, p);
- if (!priv) {
- DRM_DEBUG("can't find authenticator\n");
- return EINVAL;
- }
-
- if (dev->lock.hw_lock && _DRM_LOCK_IS_HELD(dev->lock.hw_lock->lock)
- && dev->lock.pid == p->p_pid) {
- mga_reclaim_buffers(dev, priv->pid);
- DRM_ERROR("Process %d dead, freeing lock for context %d\n",
- p->p_pid,
- _DRM_LOCKING_CONTEXT(dev->lock.hw_lock->lock));
- drm_lock_free(dev,
- &dev->lock.hw_lock->lock,
- _DRM_LOCKING_CONTEXT(dev->lock.hw_lock->lock));
-
- /* FIXME: may require heavy-handed reset of
- hardware at this point, possibly
- processed via a callback to the X
- server. */
- } else if (dev->lock.hw_lock) {
- /* The lock is required to reclaim buffers */
- for (;;) {
- if (!dev->lock.hw_lock) {
- /* Device has been unregistered */
- retcode = EINTR;
- break;
- }
- if (drm_lock_take(&dev->lock.hw_lock->lock,
- DRM_KERNEL_CONTEXT)) {
- dev->lock.pid = p->p_pid;
- dev->lock.lock_time = ticks;
- atomic_inc(&dev->total_locks);
- break; /* Got lock */
- }
- /* Contention */
- atomic_inc(&dev->total_sleeps);
- retcode = tsleep(&dev->lock.lock_queue,
- PZERO|PCATCH,
- "drmlk2",
- 0);
- if (retcode)
- break;
- }
- if(!retcode) {
- mga_reclaim_buffers(dev, priv->pid);
- drm_lock_free(dev, &dev->lock.hw_lock->lock,
- DRM_KERNEL_CONTEXT);
- }
- }
- funsetown(dev->buf_sigio);
-
- lockmgr(&dev->dev_lock, LK_EXCLUSIVE, 0, p);
- priv = drm_find_file_by_proc(dev, p);
- if (priv) {
- priv->refs--;
- if (!priv->refs) {
- TAILQ_REMOVE(&dev->files, priv, link);
- drm_free(priv, sizeof(*priv), DRM_MEM_FILES);
- }
- }
- lockmgr(&dev->dev_lock, LK_RELEASE, 0, p);
-
- atomic_inc(&dev->total_close);
- simple_lock(&dev->count_lock);
- if (!--dev->open_count) {
- if (atomic_read(&dev->ioctl_count) || dev->blocked) {
- DRM_ERROR("Device busy: %d %d\n",
- atomic_read(&dev->ioctl_count),
- dev->blocked);
- simple_unlock(&dev->count_lock);
- return EBUSY;
- }
- simple_unlock(&dev->count_lock);
- device_unbusy(dev->device);
- return mga_takedown(dev);
- }
- simple_unlock(&dev->count_lock);
- return retcode;
-}
-
-
-/* mga_ioctl is called whenever a process performs an ioctl on /dev/drm. */
-
-int
-mga_ioctl(dev_t kdev, u_long cmd, caddr_t data, int flags, struct proc *p)
-{
- int nr = DRM_IOCTL_NR(cmd);
- drm_device_t *dev = kdev->si_drv1;
- drm_file_t *priv;
- int retcode = 0;
- drm_ioctl_desc_t *ioctl;
- d_ioctl_t *func;
-
- DRM_DEBUG("dev=%p\n", dev);
- priv = drm_find_file_by_proc(dev, p);
- if (!priv) {
- DRM_DEBUG("can't find authenticator\n");
- return EINVAL;
- }
-
- atomic_inc(&dev->ioctl_count);
- atomic_inc(&dev->total_ioctl);
- ++priv->ioctl_count;
-
- DRM_DEBUG("pid = %d, cmd = 0x%02lx, nr = 0x%02x, auth = %d\n",
- p->p_pid, cmd, nr, priv->authenticated);
-
- switch (cmd) {
- case FIONBIO:
- atomic_dec(&dev->ioctl_count);
- return 0;
-
- case FIOASYNC:
- atomic_dec(&dev->ioctl_count);
- dev->flags |= FASYNC;
- return 0;
-
- case FIOSETOWN:
- atomic_dec(&dev->ioctl_count);
- return fsetown(*(int *)data, &dev->buf_sigio);
-
- case FIOGETOWN:
- atomic_dec(&dev->ioctl_count);
- *(int *) data = fgetown(dev->buf_sigio);
- return 0;
- }
-
- if (nr >= MGA_IOCTL_COUNT) {
- retcode = EINVAL;
- } else {
- ioctl = &mga_ioctls[nr];
- func = ioctl->func;
-
- if (!func) {
- DRM_DEBUG("no function\n");
- retcode = EINVAL;
- } else if ((ioctl->root_only && suser(p))
- || (ioctl->auth_needed && !priv->authenticated)) {
- retcode = EACCES;
- } else {
- retcode = (func)(kdev, cmd, data, flags, p);
- }
- }
-
- atomic_dec(&dev->ioctl_count);
- return retcode;
-}
-
-int
-mga_unlock(dev_t kdev, u_long cmd, caddr_t data, int flags, struct proc *p)
-{
- drm_device_t *dev = kdev->si_drv1;
- drm_lock_t lock;
- int s;
-
- lock = *(drm_lock_t *) data;
-
- if (lock.context == DRM_KERNEL_CONTEXT) {
- DRM_ERROR("Process %d using kernel context %d\n",
- p->p_pid, lock.context);
- return EINVAL;
- }
-
- DRM_DEBUG("%d frees lock (%d holds)\n",
- lock.context,
- _DRM_LOCKING_CONTEXT(dev->lock.hw_lock->lock));
- atomic_inc(&dev->total_unlocks);
- if (_DRM_LOCK_IS_CONT(dev->lock.hw_lock->lock))
- atomic_inc(&dev->total_contends);
- drm_lock_transfer(dev, &dev->lock.hw_lock->lock, DRM_KERNEL_CONTEXT);
-
- s = splsofttq();
- mga_dma_schedule(dev, 1);
- splx(s);
-
- if (drm_lock_free(dev, &dev->lock.hw_lock->lock,
- DRM_KERNEL_CONTEXT)) {
- DRM_ERROR("\n");
- }
-
- return 0;
-}
+#define DRIVER_IOCTLS \
+ [DRM_IOCTL_NR(DRM_IOCTL_DMA)] = { mga_dma_buffers, 1, 0 }, \
+ [DRM_IOCTL_NR(DRM_IOCTL_MGA_INIT)] = { mga_dma_init, 1, 1 }, \
+ [DRM_IOCTL_NR(DRM_IOCTL_MGA_FLUSH)] = { mga_dma_flush, 1, 0 }, \
+ [DRM_IOCTL_NR(DRM_IOCTL_MGA_RESET)] = { mga_dma_reset, 1, 0 }, \
+ [DRM_IOCTL_NR(DRM_IOCTL_MGA_SWAP)] = { mga_dma_swap, 1, 0 }, \
+ [DRM_IOCTL_NR(DRM_IOCTL_MGA_CLEAR)] = { mga_dma_clear, 1, 0 }, \
+ [DRM_IOCTL_NR(DRM_IOCTL_MGA_VERTEX)] = { mga_dma_vertex, 1, 0 }, \
+ [DRM_IOCTL_NR(DRM_IOCTL_MGA_INDICES)] = { mga_dma_indices, 1, 0 }, \
+ [DRM_IOCTL_NR(DRM_IOCTL_MGA_ILOAD)] = { mga_dma_iload, 1, 0 }, \
+ [DRM_IOCTL_NR(DRM_IOCTL_MGA_BLIT)] = { mga_dma_blit, 1, 0 },
+
+
+#define __HAVE_COUNTERS 3
+#define __HAVE_COUNTER6 _DRM_STAT_IRQ
+#define __HAVE_COUNTER7 _DRM_STAT_PRIMARY
+#define __HAVE_COUNTER8 _DRM_STAT_SECONDARY
+
+
+#include "drm_agpsupport.h"
+#include "drm_auth.h"
+#include "drm_bufs.h"
+#include "drm_context.h"
+#include "drm_dma.h"
+#include "drm_drawable.h"
+#include "drm_drv.h"
+
+
+#include "drm_fops.h"
+#include "drm_init.h"
+#include "drm_ioctl.h"
+#include "drm_lock.h"
+#include "drm_memory.h"
+#include "drm_vm.h"
+#include "drm_sysctl.h"
+
+DRIVER_MODULE(mga, pci, mga_driver, mga_devclass, 0, 0);
diff --git a/bsd/mga/mga_drv.h b/bsd/mga/mga_drv.h
index 9e51a20d..be207e30 100644
--- a/bsd/mga/mga_drv.h
+++ b/bsd/mga/mga_drv.h
@@ -1,4 +1,4 @@
-/* mga_drv.h -- Private header for the Matrox g200/g400 driver
+/* mga_drv.h -- Private header for the Matrox G200/G400 driver -*- linux-c -*-
* Created: Mon Dec 13 01:50:01 1999 by jhartmann@precisioninsight.com
*
* Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
@@ -11,163 +11,157 @@
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
- *
+ *
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
- *
+ *
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
- * DEALINGS IN THE SOFTWARE.
- *
- * Authors: Rickard E. (Rik) Faith <faith@valinux.com>
- * Jeff Hartmann <jhartmann@valinux.com>
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
*
+ * Authors:
+ * Gareth Hughes <gareth@valinux.com>
*/
-#ifndef _MGA_DRV_H_
-#define _MGA_DRV_H_
+#ifndef __MGA_DRV_H__
+#define __MGA_DRV_H__
-#define MGA_BUF_IN_USE 0
-#define MGA_BUF_SWAP_PENDING 1
-#define MGA_BUF_FORCE_FIRE 2
-#define MGA_BUF_NEEDS_OVERFLOW 3
+#ifndef u8
+#define u8 u_int8_t
+#define u16 u_int16_t
+#define u32 u_int32_t
+#endif
-typedef struct {
- u_int32_t buffer_status;
- int num_dwords;
- int max_dwords;
- u_int32_t *current_dma_ptr;
- u_int32_t *head;
- u_int32_t phys_head;
- unsigned int prim_age;
- int sec_used;
- int idx;
-} drm_mga_prim_buf_t;
-
-typedef struct _drm_mga_freelist {
- __volatile__ unsigned int age;
+typedef struct drm_mga_primary_buffer {
+ u8 *start;
+ u8 *end;
+ int size;
+
+ u32 tail;
+ int space;
+ volatile long wrapped;
+
+ volatile u32 *status;
+
+ u32 last_flush;
+ u32 last_wrap;
+
+ u32 high_mark;
+
+ spinlock_t list_lock;
+} drm_mga_primary_buffer_t;
+
+typedef struct drm_mga_freelist {
+ struct drm_mga_freelist *next;
+ struct drm_mga_freelist *prev;
+ drm_mga_age_t age;
drm_buf_t *buf;
- struct _drm_mga_freelist *next;
- struct _drm_mga_freelist *prev;
} drm_mga_freelist_t;
-#define MGA_IN_DISPATCH 0
-#define MGA_IN_FLUSH 1
-#define MGA_IN_WAIT 2
-#define MGA_IN_GETBUF 3
-
-typedef struct _drm_mga_private {
- u_int32_t dispatch_status;
- unsigned int next_prim_age;
- __volatile__ unsigned int last_prim_age;
- int reserved_map_idx;
- int buffer_map_idx;
- drm_mga_sarea_t *sarea_priv;
- int primary_size;
- int warp_ucode_size;
- int chipset;
- unsigned int frontOffset;
- unsigned int backOffset;
- unsigned int depthOffset;
- unsigned int textureOffset;
- unsigned int textureSize;
- int cpp;
- unsigned int stride;
- int sgram;
- int use_agp;
- drm_mga_warp_index_t WarpIndex[MGA_MAX_G400_PIPES];
- unsigned int WarpPipe;
- unsigned int vertexsize;
- atomic_t pending_bufs;
- void *status_page;
- unsigned long real_status_page;
- u_int8_t *ioremap;
- drm_mga_prim_buf_t **prim_bufs;
- drm_mga_prim_buf_t *next_prim;
- drm_mga_prim_buf_t *last_prim;
- drm_mga_prim_buf_t *current_prim;
- int current_prim_idx;
+typedef struct {
+ drm_mga_freelist_t *list_entry;
+ int discard;
+ int dispatched;
+} drm_mga_buf_priv_t;
+
+typedef struct drm_mga_private {
+ drm_mga_primary_buffer_t prim;
+ drm_mga_sarea_t *sarea_priv;
+
drm_mga_freelist_t *head;
drm_mga_freelist_t *tail;
- int flush_queue; /* Processes waiting until flush */
- int wait_queue; /* Processes waiting until interrupt */
- int buf_queue; /* Processes waiting for a free buf */
- /* Some validated register values:
- */
- u_int32_t mAccess;
-} drm_mga_private_t;
- /* mga_drv.c */
-extern int mga_init(device_t);
-extern void mga_cleanup(device_t);
-extern d_ioctl_t mga_version;
-extern d_open_t mga_open;
-extern d_close_t mga_close;
-extern d_ioctl_t mga_ioctl;
-extern d_ioctl_t mga_unlock;
+ unsigned int warp_pipe;
+ unsigned long warp_pipe_phys[MGA_MAX_WARP_PIPES];
+
+ int chipset;
+ int usec_timeout;
+
+ u32 clear_cmd;
+ u32 maccess;
+
+ unsigned int fb_cpp;
+ unsigned int front_offset;
+ unsigned int front_pitch;
+ unsigned int back_offset;
+ unsigned int back_pitch;
+
+ unsigned int depth_cpp;
+ unsigned int depth_offset;
+ unsigned int depth_pitch;
+
+ unsigned int texture_offset;
+ unsigned int texture_size;
+
+ drm_map_t *sarea;
+ drm_map_t *fb;
+ drm_map_t *mmio;
+ drm_map_t *status;
+ drm_map_t *warp;
+ drm_map_t *primary;
+ drm_map_t *buffers;
+ drm_map_t *agp_textures;
+} drm_mga_private_t;
/* mga_dma.c */
-extern int mga_dma_schedule(drm_device_t *dev, int locked);
-extern int mga_irq_install(drm_device_t *dev, int irq);
-extern int mga_irq_uninstall(drm_device_t *dev);
-extern d_ioctl_t mga_dma;
-extern d_ioctl_t mga_control;
-extern d_ioctl_t mga_lock;
-
-/* mga_dma_init does init and release */
-extern int mga_dma_cleanup(drm_device_t *dev);
-extern d_ioctl_t mga_dma_init;
-extern d_ioctl_t mga_flush_ioctl;
-extern void mga_flush_write_combine(void);
-extern unsigned int mga_create_sync_tag(drm_device_t *dev);
-extern drm_buf_t *mga_freelist_get(drm_device_t *dev);
-extern int mga_freelist_put(drm_device_t *dev, drm_buf_t *buf);
-extern int mga_advance_primary(drm_device_t *dev);
-extern void mga_reclaim_buffers(drm_device_t *dev, pid_t pid);
-
- /* mga_bufs.c */
-extern d_ioctl_t mga_addbufs;
-extern d_ioctl_t mga_infobufs;
-extern d_ioctl_t mga_markbufs;
-extern d_ioctl_t mga_freebufs;
-extern d_ioctl_t mga_mapbufs;
-extern d_ioctl_t mga_addmap;
- /* mga_state.c */
-extern d_ioctl_t mga_clear_bufs;
-extern d_ioctl_t mga_swap_bufs;
-extern d_ioctl_t mga_iload;
-extern d_ioctl_t mga_vertex;
-extern d_ioctl_t mga_indices;
- /* mga_context.c */
-extern d_ioctl_t mga_resctx;
-extern d_ioctl_t mga_addctx;
-extern d_ioctl_t mga_modctx;
-extern d_ioctl_t mga_getctx;
-extern d_ioctl_t mga_switchctx;
-extern d_ioctl_t mga_newctx;
-extern d_ioctl_t mga_rmctx;
-
-extern int mga_context_switch(drm_device_t *dev, int old, int new);
-extern int mga_context_switch_complete(drm_device_t *dev, int new);
-
-
-typedef enum {
- TT_GENERAL,
- TT_BLIT,
- TT_VECTOR,
- TT_VERTEX
-} transferType_t;
+extern int mga_dma_init( DRM_OS_IOCTL );
+extern int mga_dma_flush( DRM_OS_IOCTL );
+extern int mga_dma_reset( DRM_OS_IOCTL );
+extern int mga_dma_buffers( DRM_OS_IOCTL );
-typedef struct {
- drm_mga_freelist_t *my_freelist;
- int discard;
- int dispatched;
-} drm_mga_buf_priv_t;
+extern int mga_do_wait_for_idle( drm_mga_private_t *dev_priv );
+extern int mga_do_dma_idle( drm_mga_private_t *dev_priv );
+extern int mga_do_dma_reset( drm_mga_private_t *dev_priv );
+extern int mga_do_engine_reset( drm_mga_private_t *dev_priv );
+extern int mga_do_cleanup_dma( drm_device_t *dev );
+
+extern void mga_do_dma_flush( drm_mga_private_t *dev_priv );
+extern void mga_do_dma_wrap_start( drm_mga_private_t *dev_priv );
+extern void mga_do_dma_wrap_end( drm_mga_private_t *dev_priv );
+
+extern int mga_freelist_put( drm_device_t *dev, drm_buf_t *buf );
+
+ /* mga_state.c */
+extern int mga_dma_clear( DRM_OS_IOCTL );
+extern int mga_dma_swap( DRM_OS_IOCTL );
+extern int mga_dma_vertex( DRM_OS_IOCTL );
+extern int mga_dma_indices( DRM_OS_IOCTL );
+extern int mga_dma_iload( DRM_OS_IOCTL );
+extern int mga_dma_blit( DRM_OS_IOCTL );
+
+ /* mga_warp.c */
+extern int mga_warp_install_microcode( drm_mga_private_t *dev_priv );
+extern int mga_warp_init( drm_mga_private_t *dev_priv );
+
+#define mga_flush_write_combine() DRM_OS_READMEMORYBARRIER
+
+#define MGA_BASE( reg ) ((unsigned long)(dev_priv->mmio->handle))
+#define MGA_ADDR( reg ) (MGA_BASE(reg) + reg)
+
+#define MGA_DEREF( reg ) *(volatile u32 *)MGA_ADDR( reg )
+#define MGA_DEREF8( reg ) *(volatile u8 *)MGA_ADDR( reg )
+
+#ifdef __alpha__
+#define MGA_READ( reg ) (_MGA_READ((u32 *)MGA_ADDR(reg)))
+#define MGA_WRITE( reg, val ) do { wmb(); MGA_DEREF( reg ) = val; } while (0)
+#define MGA_WRITE8( reg, val ) do { wmb(); MGA_DEREF8( reg ) = val; } while (0)
+
+static inline u32 _MGA_READ(u32 *addr)
+{
+ mb();
+ return *(volatile u32 *)addr;
+}
+
+#else
+#define MGA_READ( reg ) MGA_DEREF( reg )
+#define MGA_WRITE( reg, val ) do { MGA_DEREF( reg ) = val; } while (0)
+#define MGA_WRITE8( reg, val ) do { MGA_DEREF8( reg ) = val; } while (0)
+#endif
#define DWGREG0 0x1c00
#define DWGREG0_END 0x1dff
@@ -175,322 +169,470 @@ typedef struct {
#define DWGREG1_END 0x2dff
#define ISREG0(r) (r >= DWGREG0 && r <= DWGREG0_END)
-#define ADRINDEX0(r) (u_int8_t)((r - DWGREG0) >> 2)
-#define ADRINDEX1(r) (u_int8_t)(((r - DWGREG1) >> 2) | 0x80)
-#define ADRINDEX(r) (ISREG0(r) ? ADRINDEX0(r) : ADRINDEX1(r))
-
-#define MGA_VERBOSE 0
-#define MGA_NUM_PRIM_BUFS 8
-
-#define PRIMLOCALS u_int8_t tempIndex[4]; u_int32_t *dma_ptr; u_int32_t phys_head; \
- int outcount, num_dwords
-
-#define PRIM_OVERFLOW(dev, dev_priv, length) do { \
- drm_mga_prim_buf_t *tmp_buf = \
- dev_priv->prim_bufs[dev_priv->current_prim_idx]; \
- if( test_bit(MGA_BUF_NEEDS_OVERFLOW, \
- &tmp_buf->buffer_status)) { \
- mga_advance_primary(dev); \
- mga_dma_schedule(dev, 1); \
- tmp_buf = dev_priv->prim_bufs[dev_priv->current_prim_idx]; \
- } else if( tmp_buf->max_dwords - tmp_buf->num_dwords < length ||\
- tmp_buf->sec_used > MGA_DMA_BUF_NR/2) { \
- set_bit(MGA_BUF_FORCE_FIRE, &tmp_buf->buffer_status); \
- mga_advance_primary(dev); \
- mga_dma_schedule(dev, 1); \
- tmp_buf = dev_priv->prim_bufs[dev_priv->current_prim_idx]; \
+#define DMAREG0(r) (u8)((r - DWGREG0) >> 2)
+#define DMAREG1(r) (u8)(((r - DWGREG1) >> 2) | 0x80)
+#define DMAREG(r) (ISREG0(r) ? DMAREG0(r) : DMAREG1(r))
+
+
+
+/* ================================================================
+ * Helper macross...
+ */
+
+#define MGA_EMIT_STATE( dev_priv, dirty ) \
+do { \
+ if ( (dirty) & ~MGA_UPLOAD_CLIPRECTS ) { \
+ if ( dev_priv->chipset == MGA_CARD_TYPE_G400 ) { \
+ mga_g400_emit_state( dev_priv ); \
+ } else { \
+ mga_g200_emit_state( dev_priv ); \
+ } \
} \
- if(MGA_VERBOSE) \
- DRM_DEBUG("PRIMGETPTR in %s\n", __FUNCTION__); \
- dma_ptr = tmp_buf->current_dma_ptr; \
- num_dwords = tmp_buf->num_dwords; \
- phys_head = tmp_buf->phys_head; \
- outcount = 0; \
-} while(0)
-
-#define PRIMGETPTR(dev_priv) do { \
- drm_mga_prim_buf_t *tmp_buf = \
- dev_priv->prim_bufs[dev_priv->current_prim_idx]; \
- if(MGA_VERBOSE) \
- DRM_DEBUG("PRIMGETPTR in %s\n", __FUNCTION__); \
- dma_ptr = tmp_buf->current_dma_ptr; \
- num_dwords = tmp_buf->num_dwords; \
- phys_head = tmp_buf->phys_head; \
- outcount = 0; \
-} while(0)
-
-#define PRIMPTR(prim_buf) do { \
- if(MGA_VERBOSE) \
- DRM_DEBUG("PRIMPTR in %s\n", __FUNCTION__); \
- dma_ptr = prim_buf->current_dma_ptr; \
- num_dwords = prim_buf->num_dwords; \
- phys_head = prim_buf->phys_head; \
- outcount = 0; \
-} while(0)
-
-#define PRIMFINISH(prim_buf) do { \
- if (MGA_VERBOSE) { \
- DRM_DEBUG( "PRIMFINISH in %s\n", __FUNCTION__); \
- if (outcount & 3) \
- DRM_DEBUG(" --- truncation\n"); \
- } \
- prim_buf->num_dwords = num_dwords; \
- prim_buf->current_dma_ptr = dma_ptr; \
-} while(0)
-
-#define PRIMADVANCE(dev_priv) do { \
-drm_mga_prim_buf_t *tmp_buf = \
- dev_priv->prim_bufs[dev_priv->current_prim_idx]; \
- if (MGA_VERBOSE) { \
- DRM_DEBUG("PRIMADVANCE in %s\n", __FUNCTION__); \
- if (outcount & 3) \
- DRM_DEBUG(" --- truncation\n"); \
- } \
- tmp_buf->num_dwords = num_dwords; \
- tmp_buf->current_dma_ptr = dma_ptr; \
} while (0)
-#define PRIMUPDATE(dev_priv) do { \
- drm_mga_prim_buf_t *tmp_buf = \
- dev_priv->prim_bufs[dev_priv->current_prim_idx]; \
- tmp_buf->sec_used++; \
+#define LOCK_TEST_WITH_RETURN( dev ) \
+do { \
+ if ( !_DRM_LOCK_IS_HELD( dev->lock.hw_lock->lock ) || \
+ dev->lock.pid != DRM_OS_CURRENTPID ) { \
+ DRM_ERROR( "%s called without lock held\n", \
+ __FUNCTION__ ); \
+ DRM_OS_RETURN( EINVAL ); \
+ } \
} while (0)
-#define AGEBUF(dev_priv, buf_priv) do { \
- drm_mga_prim_buf_t *tmp_buf = \
- dev_priv->prim_bufs[dev_priv->current_prim_idx]; \
- buf_priv->my_freelist->age = tmp_buf->prim_age; \
+#define WRAP_TEST_WITH_RETURN( dev_priv ) \
+do { \
+ if ( test_bit( 0, &dev_priv->prim.wrapped ) ) { \
+ if ( mga_is_idle( dev_priv ) ) { \
+ mga_do_dma_wrap_end( dev_priv ); \
+ } else if ( dev_priv->prim.space < \
+ dev_priv->prim.high_mark ) { \
+ if ( MGA_DMA_DEBUG ) \
+ DRM_INFO( __FUNCTION__": wrap...\n" ); \
+ DRM_OS_RETURN( EBUSY); \
+ } \
+ } \
} while (0)
+#define WRAP_WAIT_WITH_RETURN( dev_priv ) \
+do { \
+ if ( test_bit( 0, &dev_priv->prim.wrapped ) ) { \
+ if ( mga_do_wait_for_idle( dev_priv ) ) { \
+ if ( MGA_DMA_DEBUG ) \
+ DRM_INFO( __FUNCTION__": wrap...\n" ); \
+ DRM_OS_RETURN( EBUSY); \
+ } \
+ mga_do_dma_wrap_end( dev_priv ); \
+ } \
+} while (0)
+
+
+/* ================================================================
+ * Primary DMA command stream
+ */
+
+#define MGA_VERBOSE 0
-#define PRIMOUTREG(reg, val) do { \
- tempIndex[outcount]=ADRINDEX(reg); \
- dma_ptr[1+outcount] = val; \
- if (MGA_VERBOSE) \
- DRM_DEBUG(" PRIMOUT %d: 0x%x -- 0x%x\n", \
- num_dwords + 1 + outcount, ADRINDEX(reg), val); \
- if( ++outcount == 4) { \
- outcount = 0; \
- dma_ptr[0] = *(u_int32_t *)tempIndex; \
- dma_ptr+=5; \
- num_dwords += 5; \
+#define DMA_LOCALS unsigned int write; volatile u8 *prim;
+
+#define DMA_BLOCK_SIZE (5 * sizeof(u32))
+
+#define BEGIN_DMA( n ) \
+do { \
+ if ( MGA_VERBOSE ) { \
+ DRM_INFO( "BEGIN_DMA( %d ) in %s\n", \
+ (n), __FUNCTION__ ); \
+ DRM_INFO( " space=0x%x req=0x%x\n", \
+ dev_priv->prim.space, (n) * DMA_BLOCK_SIZE ); \
} \
-}while (0)
+ prim = dev_priv->prim.start; \
+ write = dev_priv->prim.tail; \
+} while (0)
+
+#define BEGIN_DMA_WRAP() \
+do { \
+ if ( MGA_VERBOSE ) { \
+ DRM_INFO( "BEGIN_DMA() in %s\n", __FUNCTION__ ); \
+ DRM_INFO( " space=0x%x\n", dev_priv->prim.space ); \
+ } \
+ prim = dev_priv->prim.start; \
+ write = dev_priv->prim.tail; \
+} while (0)
+
+#define ADVANCE_DMA() \
+do { \
+ dev_priv->prim.tail = write; \
+ if ( MGA_VERBOSE ) { \
+ DRM_INFO( "ADVANCE_DMA() tail=0x%05x sp=0x%x\n", \
+ write, dev_priv->prim.space ); \
+ } \
+} while (0)
+
+#define FLUSH_DMA() \
+do { \
+ if ( 0 ) { \
+ DRM_INFO( __FUNCTION__ ":\n" ); \
+ DRM_INFO( " tail=0x%06x head=0x%06lx\n", \
+ dev_priv->prim.tail, \
+ MGA_READ( MGA_PRIMADDRESS ) - \
+ dev_priv->primary->offset ); \
+ } \
+ if ( !test_bit( 0, &dev_priv->prim.wrapped ) ) { \
+ if ( dev_priv->prim.space < \
+ dev_priv->prim.high_mark ) { \
+ mga_do_dma_wrap_start( dev_priv ); \
+ } else { \
+ mga_do_dma_flush( dev_priv ); \
+ } \
+ } \
+} while (0)
+
+/* Never use this, always use DMA_BLOCK(...) for primary DMA output.
+ */
+#define DMA_WRITE( offset, val ) \
+do { \
+ if ( MGA_VERBOSE ) { \
+ DRM_INFO( " DMA_WRITE( 0x%08x ) at 0x%04x\n", \
+ (u32)(val), write + (offset) * sizeof(u32) ); \
+ } \
+ *(volatile u32 *)(prim + write + (offset) * sizeof(u32)) = val; \
+} while (0)
+
+#define DMA_BLOCK( reg0, val0, reg1, val1, reg2, val2, reg3, val3 ) \
+do { \
+ DMA_WRITE( 0, ((DMAREG( reg0 ) << 0) | \
+ (DMAREG( reg1 ) << 8) | \
+ (DMAREG( reg2 ) << 16) | \
+ (DMAREG( reg3 ) << 24)) ); \
+ DMA_WRITE( 1, val0 ); \
+ DMA_WRITE( 2, val1 ); \
+ DMA_WRITE( 3, val2 ); \
+ DMA_WRITE( 4, val3 ); \
+ write += DMA_BLOCK_SIZE; \
+} while (0)
+
+
+/* Buffer aging via primary DMA stream head pointer.
+ */
+
+#define SET_AGE( age, h, w ) \
+do { \
+ (age)->head = h; \
+ (age)->wrap = w; \
+} while (0)
+
+#define TEST_AGE( age, h, w ) ( (age)->wrap < w || \
+ ( (age)->wrap == w && \
+ (age)->head < h ) )
+
+#define AGE_BUFFER( buf_priv ) \
+do { \
+ drm_mga_freelist_t *entry = (buf_priv)->list_entry; \
+ if ( (buf_priv)->dispatched ) { \
+ entry->age.head = (dev_priv->prim.tail + \
+ dev_priv->primary->offset); \
+ entry->age.wrap = dev_priv->sarea_priv->last_wrap; \
+ } else { \
+ entry->age.head = 0; \
+ entry->age.wrap = 0; \
+ } \
+} while (0)
+
+
+#define MGA_ENGINE_IDLE_MASK (MGA_SOFTRAPEN | \
+ MGA_DWGENGSTS | \
+ MGA_ENDPRDMASTS)
+#define MGA_DMA_IDLE_MASK (MGA_SOFTRAPEN | \
+ MGA_ENDPRDMASTS)
+
+#define MGA_DMA_DEBUG 0
+
+
/* A reduced set of the mga registers.
*/
+#define MGA_CRTC_INDEX 0x1fd4
+
+#define MGA_ALPHACTRL 0x2c7c
+#define MGA_AR0 0x1c60
+#define MGA_AR1 0x1c64
+#define MGA_AR2 0x1c68
+#define MGA_AR3 0x1c6c
+#define MGA_AR4 0x1c70
+#define MGA_AR5 0x1c74
+#define MGA_AR6 0x1c78
+
+#define MGA_CXBNDRY 0x1c80
+#define MGA_CXLEFT 0x1ca0
+#define MGA_CXRIGHT 0x1ca4
+
+#define MGA_DMAPAD 0x1c54
+#define MGA_DSTORG 0x2cb8
+#define MGA_DWGCTL 0x1c00
+# define MGA_OPCOD_MASK (15 << 0)
+# define MGA_OPCOD_TRAP (4 << 0)
+# define MGA_OPCOD_TEXTURE_TRAP (6 << 0)
+# define MGA_OPCOD_BITBLT (8 << 0)
+# define MGA_OPCOD_ILOAD (9 << 0)
+# define MGA_ATYPE_MASK (7 << 4)
+# define MGA_ATYPE_RPL (0 << 4)
+# define MGA_ATYPE_RSTR (1 << 4)
+# define MGA_ATYPE_ZI (3 << 4)
+# define MGA_ATYPE_BLK (4 << 4)
+# define MGA_ATYPE_I (7 << 4)
+# define MGA_LINEAR (1 << 7)
+# define MGA_ZMODE_MASK (7 << 8)
+# define MGA_ZMODE_NOZCMP (0 << 8)
+# define MGA_ZMODE_ZE (2 << 8)
+# define MGA_ZMODE_ZNE (3 << 8)
+# define MGA_ZMODE_ZLT (4 << 8)
+# define MGA_ZMODE_ZLTE (5 << 8)
+# define MGA_ZMODE_ZGT (6 << 8)
+# define MGA_ZMODE_ZGTE (7 << 8)
+# define MGA_SOLID (1 << 11)
+# define MGA_ARZERO (1 << 12)
+# define MGA_SGNZERO (1 << 13)
+# define MGA_SHIFTZERO (1 << 14)
+# define MGA_BOP_MASK (15 << 16)
+# define MGA_BOP_ZERO (0 << 16)
+# define MGA_BOP_DST (10 << 16)
+# define MGA_BOP_SRC (12 << 16)
+# define MGA_BOP_ONE (15 << 16)
+# define MGA_TRANS_SHIFT 20
+# define MGA_TRANS_MASK (15 << 20)
+# define MGA_BLTMOD_MASK (15 << 25)
+# define MGA_BLTMOD_BMONOLEF (0 << 25)
+# define MGA_BLTMOD_BMONOWF (4 << 25)
+# define MGA_BLTMOD_PLAN (1 << 25)
+# define MGA_BLTMOD_BFCOL (2 << 25)
+# define MGA_BLTMOD_BU32BGR (3 << 25)
+# define MGA_BLTMOD_BU32RGB (7 << 25)
+# define MGA_BLTMOD_BU24BGR (11 << 25)
+# define MGA_BLTMOD_BU24RGB (15 << 25)
+# define MGA_PATTERN (1 << 29)
+# define MGA_TRANSC (1 << 30)
+# define MGA_CLIPDIS (1 << 31)
+#define MGA_DWGSYNC 0x2c4c
+
+#define MGA_FCOL 0x1c24
+#define MGA_FIFOSTATUS 0x1e10
+#define MGA_FOGCOL 0x1cf4
+#define MGA_FXBNDRY 0x1c84
+#define MGA_FXLEFT 0x1ca8
+#define MGA_FXRIGHT 0x1cac
+
+#define MGA_ICLEAR 0x1e18
+# define MGA_SOFTRAPICLR (1 << 0)
+#define MGA_IEN 0x1e1c
+# define MGA_SOFTRAPIEN (1 << 0)
+
+#define MGA_LEN 0x1c5c
+
+#define MGA_MACCESS 0x1c04
+
+#define MGA_PITCH 0x1c8c
+#define MGA_PLNWT 0x1c1c
+#define MGA_PRIMADDRESS 0x1e58
+# define MGA_DMA_GENERAL (0 << 0)
+# define MGA_DMA_BLIT (1 << 0)
+# define MGA_DMA_VECTOR (2 << 0)
+# define MGA_DMA_VERTEX (3 << 0)
+#define MGA_PRIMEND 0x1e5c
+# define MGA_PRIMNOSTART (1 << 0)
+# define MGA_PAGPXFER (1 << 1)
+#define MGA_PRIMPTR 0x1e50
+# define MGA_PRIMPTREN0 (1 << 0)
+# define MGA_PRIMPTREN1 (1 << 1)
+
+#define MGA_RST 0x1e40
+# define MGA_SOFTRESET (1 << 0)
+# define MGA_SOFTEXTRST (1 << 1)
+
+#define MGA_SECADDRESS 0x2c40
+#define MGA_SECEND 0x2c44
+#define MGA_SETUPADDRESS 0x2cd0
+#define MGA_SETUPEND 0x2cd4
+#define MGA_SGN 0x1c58
+#define MGA_SOFTRAP 0x2c48
+#define MGA_SRCORG 0x2cb4
+# define MGA_SRMMAP_MASK (1 << 0)
+# define MGA_SRCMAP_FB (0 << 0)
+# define MGA_SRCMAP_SYSMEM (1 << 0)
+# define MGA_SRCACC_MASK (1 << 1)
+# define MGA_SRCACC_PCI (0 << 1)
+# define MGA_SRCACC_AGP (1 << 1)
+#define MGA_STATUS 0x1e14
+# define MGA_SOFTRAPEN (1 << 0)
+# define MGA_DWGENGSTS (1 << 16)
+# define MGA_ENDPRDMASTS (1 << 17)
+#define MGA_STENCIL 0x2cc8
+#define MGA_STENCILCTL 0x2ccc
+
+#define MGA_TDUALSTAGE0 0x2cf8
+#define MGA_TDUALSTAGE1 0x2cfc
+#define MGA_TEXBORDERCOL 0x2c5c
+#define MGA_TEXCTL 0x2c30
+#define MGA_TEXCTL2 0x2c3c
+# define MGA_DUALTEX (1 << 7)
+# define MGA_G400_TC2_MAGIC (1 << 15)
+# define MGA_MAP1_ENABLE (1 << 31)
+#define MGA_TEXFILTER 0x2c58
+#define MGA_TEXHEIGHT 0x2c2c
+#define MGA_TEXORG 0x2c24
+# define MGA_TEXORGMAP_MASK (1 << 0)
+# define MGA_TEXORGMAP_FB (0 << 0)
+# define MGA_TEXORGMAP_SYSMEM (1 << 0)
+# define MGA_TEXORGACC_MASK (1 << 1)
+# define MGA_TEXORGACC_PCI (0 << 1)
+# define MGA_TEXORGACC_AGP (1 << 1)
+#define MGA_TEXORG1 0x2ca4
+#define MGA_TEXORG2 0x2ca8
+#define MGA_TEXORG3 0x2cac
+#define MGA_TEXORG4 0x2cb0
+#define MGA_TEXTRANS 0x2c34
+#define MGA_TEXTRANSHIGH 0x2c38
+#define MGA_TEXWIDTH 0x2c28
+
+#define MGA_WACCEPTSEQ 0x1dd4
+#define MGA_WCODEADDR 0x1e6c
+#define MGA_WFLAG 0x1dc4
+#define MGA_WFLAG1 0x1de0
+#define MGA_WFLAGNB 0x1e64
+#define MGA_WFLAGNB1 0x1e08
+#define MGA_WGETMSB 0x1dc8
+#define MGA_WIADDR 0x1dc0
+#define MGA_WIADDR2 0x1dd8
+# define MGA_WMODE_SUSPEND (0 << 0)
+# define MGA_WMODE_RESUME (1 << 0)
+# define MGA_WMODE_JUMP (2 << 0)
+# define MGA_WMODE_START (3 << 0)
+# define MGA_WAGP_ENABLE (1 << 2)
+#define MGA_WMISC 0x1e70
+# define MGA_WUCODECACHE_ENABLE (1 << 0)
+# define MGA_WMASTER_ENABLE (1 << 1)
+# define MGA_WCACHEFLUSH_ENABLE (1 << 3)
+#define MGA_WVRTXSZ 0x1dcc
+
+#define MGA_YBOT 0x1c9c
+#define MGA_YDST 0x1c90
+#define MGA_YDSTLEN 0x1c88
+#define MGA_YDSTORG 0x1c94
+#define MGA_YTOP 0x1c98
+
+#define MGA_ZORG 0x1c0c
+
+/* This finishes the current batch of commands
+ */
+#define MGA_EXEC 0x0100
-#define MGAREG_MGA_EXEC 0x0100
-#define MGAREG_ALPHACTRL 0x2c7c
-#define MGAREG_AR0 0x1c60
-#define MGAREG_AR1 0x1c64
-#define MGAREG_AR2 0x1c68
-#define MGAREG_AR3 0x1c6c
-#define MGAREG_AR4 0x1c70
-#define MGAREG_AR5 0x1c74
-#define MGAREG_AR6 0x1c78
-#define MGAREG_CXBNDRY 0x1c80
-#define MGAREG_CXLEFT 0x1ca0
-#define MGAREG_CXRIGHT 0x1ca4
-#define MGAREG_DMAPAD 0x1c54
-#define MGAREG_DSTORG 0x2cb8
-#define MGAREG_DWGCTL 0x1c00
-#define MGAREG_DWGSYNC 0x2c4c
-#define MGAREG_FCOL 0x1c24
-#define MGAREG_FIFOSTATUS 0x1e10
-#define MGAREG_FOGCOL 0x1cf4
-#define MGAREG_FXBNDRY 0x1c84
-#define MGAREG_FXLEFT 0x1ca8
-#define MGAREG_FXRIGHT 0x1cac
-#define MGAREG_ICLEAR 0x1e18
-#define MGAREG_IEN 0x1e1c
-#define MGAREG_LEN 0x1c5c
-#define MGAREG_MACCESS 0x1c04
-#define MGAREG_PITCH 0x1c8c
-#define MGAREG_PLNWT 0x1c1c
-#define MGAREG_PRIMADDRESS 0x1e58
-#define MGAREG_PRIMEND 0x1e5c
-#define MGAREG_PRIMPTR 0x1e50
-#define MGAREG_SECADDRESS 0x2c40
-#define MGAREG_SECEND 0x2c44
-#define MGAREG_SETUPADDRESS 0x2cd0
-#define MGAREG_SETUPEND 0x2cd4
-#define MGAREG_SOFTRAP 0x2c48
-#define MGAREG_SRCORG 0x2cb4
-#define MGAREG_STATUS 0x1e14
-#define MGAREG_STENCIL 0x2cc8
-#define MGAREG_STENCILCTL 0x2ccc
-#define MGAREG_TDUALSTAGE0 0x2cf8
-#define MGAREG_TDUALSTAGE1 0x2cfc
-#define MGAREG_TEXBORDERCOL 0x2c5c
-#define MGAREG_TEXCTL 0x2c30
-#define MGAREG_TEXCTL2 0x2c3c
-#define MGAREG_TEXFILTER 0x2c58
-#define MGAREG_TEXHEIGHT 0x2c2c
-#define MGAREG_TEXORG 0x2c24
-#define MGAREG_TEXORG1 0x2ca4
-#define MGAREG_TEXORG2 0x2ca8
-#define MGAREG_TEXORG3 0x2cac
-#define MGAREG_TEXORG4 0x2cb0
-#define MGAREG_TEXTRANS 0x2c34
-#define MGAREG_TEXTRANSHIGH 0x2c38
-#define MGAREG_TEXWIDTH 0x2c28
-#define MGAREG_WACCEPTSEQ 0x1dd4
-#define MGAREG_WCODEADDR 0x1e6c
-#define MGAREG_WFLAG 0x1dc4
-#define MGAREG_WFLAG1 0x1de0
-#define MGAREG_WFLAGNB 0x1e64
-#define MGAREG_WFLAGNB1 0x1e08
-#define MGAREG_WGETMSB 0x1dc8
-#define MGAREG_WIADDR 0x1dc0
-#define MGAREG_WIADDR2 0x1dd8
-#define MGAREG_WMISC 0x1e70
-#define MGAREG_WVRTXSZ 0x1dcc
-#define MGAREG_YBOT 0x1c9c
-#define MGAREG_YDST 0x1c90
-#define MGAREG_YDSTLEN 0x1c88
-#define MGAREG_YDSTORG 0x1c94
-#define MGAREG_YTOP 0x1c98
-#define MGAREG_ZORG 0x1c0c
-
-/* Warp registers */
-#define MGAREG_WR0 0x2d00
-#define MGAREG_WR1 0x2d04
-#define MGAREG_WR2 0x2d08
-#define MGAREG_WR3 0x2d0c
-#define MGAREG_WR4 0x2d10
-#define MGAREG_WR5 0x2d14
-#define MGAREG_WR6 0x2d18
-#define MGAREG_WR7 0x2d1c
-#define MGAREG_WR8 0x2d20
-#define MGAREG_WR9 0x2d24
-#define MGAREG_WR10 0x2d28
-#define MGAREG_WR11 0x2d2c
-#define MGAREG_WR12 0x2d30
-#define MGAREG_WR13 0x2d34
-#define MGAREG_WR14 0x2d38
-#define MGAREG_WR15 0x2d3c
-#define MGAREG_WR16 0x2d40
-#define MGAREG_WR17 0x2d44
-#define MGAREG_WR18 0x2d48
-#define MGAREG_WR19 0x2d4c
-#define MGAREG_WR20 0x2d50
-#define MGAREG_WR21 0x2d54
-#define MGAREG_WR22 0x2d58
-#define MGAREG_WR23 0x2d5c
-#define MGAREG_WR24 0x2d60
-#define MGAREG_WR25 0x2d64
-#define MGAREG_WR26 0x2d68
-#define MGAREG_WR27 0x2d6c
-#define MGAREG_WR28 0x2d70
-#define MGAREG_WR29 0x2d74
-#define MGAREG_WR30 0x2d78
-#define MGAREG_WR31 0x2d7c
-#define MGAREG_WR32 0x2d80
-#define MGAREG_WR33 0x2d84
-#define MGAREG_WR34 0x2d88
-#define MGAREG_WR35 0x2d8c
-#define MGAREG_WR36 0x2d90
-#define MGAREG_WR37 0x2d94
-#define MGAREG_WR38 0x2d98
-#define MGAREG_WR39 0x2d9c
-#define MGAREG_WR40 0x2da0
-#define MGAREG_WR41 0x2da4
-#define MGAREG_WR42 0x2da8
-#define MGAREG_WR43 0x2dac
-#define MGAREG_WR44 0x2db0
-#define MGAREG_WR45 0x2db4
-#define MGAREG_WR46 0x2db8
-#define MGAREG_WR47 0x2dbc
-#define MGAREG_WR48 0x2dc0
-#define MGAREG_WR49 0x2dc4
-#define MGAREG_WR50 0x2dc8
-#define MGAREG_WR51 0x2dcc
-#define MGAREG_WR52 0x2dd0
-#define MGAREG_WR53 0x2dd4
-#define MGAREG_WR54 0x2dd8
-#define MGAREG_WR55 0x2ddc
-#define MGAREG_WR56 0x2de0
-#define MGAREG_WR57 0x2de4
-#define MGAREG_WR58 0x2de8
-#define MGAREG_WR59 0x2dec
-#define MGAREG_WR60 0x2df0
-#define MGAREG_WR61 0x2df4
-#define MGAREG_WR62 0x2df8
-#define MGAREG_WR63 0x2dfc
-
-
-#define PDEA_pagpxfer_enable 0x2
-
-#define WIA_wmode_suspend 0x0
-#define WIA_wmode_start 0x3
-#define WIA_wagp_agp 0x4
-
-#define DC_opcod_line_open 0x0
-#define DC_opcod_autoline_open 0x1
-#define DC_opcod_line_close 0x2
-#define DC_opcod_autoline_close 0x3
-#define DC_opcod_trap 0x4
-#define DC_opcod_texture_trap 0x6
-#define DC_opcod_bitblt 0x8
-#define DC_opcod_iload 0x9
-#define DC_atype_rpl 0x0
-#define DC_atype_rstr 0x10
-#define DC_atype_zi 0x30
-#define DC_atype_blk 0x40
-#define DC_atype_i 0x70
-#define DC_linear_xy 0x0
-#define DC_linear_linear 0x80
-#define DC_zmode_nozcmp 0x0
-#define DC_zmode_ze 0x200
-#define DC_zmode_zne 0x300
-#define DC_zmode_zlt 0x400
-#define DC_zmode_zlte 0x500
-#define DC_zmode_zgt 0x600
-#define DC_zmode_zgte 0x700
-#define DC_solid_disable 0x0
-#define DC_solid_enable 0x800
-#define DC_arzero_disable 0x0
-#define DC_arzero_enable 0x1000
-#define DC_sgnzero_disable 0x0
-#define DC_sgnzero_enable 0x2000
-#define DC_shftzero_disable 0x0
-#define DC_shftzero_enable 0x4000
-#define DC_bop_SHIFT 16
-#define DC_trans_SHIFT 20
-#define DC_bltmod_bmonolef 0x0
-#define DC_bltmod_bmonowf 0x8000000
-#define DC_bltmod_bplan 0x2000000
-#define DC_bltmod_bfcol 0x4000000
-#define DC_bltmod_bu32bgr 0x6000000
-#define DC_bltmod_bu32rgb 0xe000000
-#define DC_bltmod_bu24bgr 0x16000000
-#define DC_bltmod_bu24rgb 0x1e000000
-#define DC_pattern_disable 0x0
-#define DC_pattern_enable 0x20000000
-#define DC_transc_disable 0x0
-#define DC_transc_enable 0x40000000
-#define DC_clipdis_disable 0x0
-#define DC_clipdis_enable 0x80000000
-
-#define SETADD_mode_vertlist 0x0
-
-
-#define MGA_CLEAR_CMD (DC_opcod_trap | DC_arzero_enable | \
- DC_sgnzero_enable | DC_shftzero_enable | \
- (0xC << DC_bop_SHIFT) | DC_clipdis_enable | \
- DC_solid_enable | DC_transc_enable)
-
-
-#define MGA_COPY_CMD (DC_opcod_bitblt | DC_atype_rpl | DC_linear_xy | \
- DC_solid_disable | DC_arzero_disable | \
- DC_sgnzero_enable | DC_shftzero_enable | \
- (0xC << DC_bop_SHIFT) | DC_bltmod_bfcol | \
- DC_pattern_disable | DC_transc_disable | \
- DC_clipdis_enable) \
-
-#define MGA_FLUSH_CMD (DC_opcod_texture_trap | (0xF << DC_trans_SHIFT) |\
- DC_arzero_enable | DC_sgnzero_enable | \
- DC_atype_i)
+/* Warp registers
+ */
+#define MGA_WR0 0x2d00
+#define MGA_WR1 0x2d04
+#define MGA_WR2 0x2d08
+#define MGA_WR3 0x2d0c
+#define MGA_WR4 0x2d10
+#define MGA_WR5 0x2d14
+#define MGA_WR6 0x2d18
+#define MGA_WR7 0x2d1c
+#define MGA_WR8 0x2d20
+#define MGA_WR9 0x2d24
+#define MGA_WR10 0x2d28
+#define MGA_WR11 0x2d2c
+#define MGA_WR12 0x2d30
+#define MGA_WR13 0x2d34
+#define MGA_WR14 0x2d38
+#define MGA_WR15 0x2d3c
+#define MGA_WR16 0x2d40
+#define MGA_WR17 0x2d44
+#define MGA_WR18 0x2d48
+#define MGA_WR19 0x2d4c
+#define MGA_WR20 0x2d50
+#define MGA_WR21 0x2d54
+#define MGA_WR22 0x2d58
+#define MGA_WR23 0x2d5c
+#define MGA_WR24 0x2d60
+#define MGA_WR25 0x2d64
+#define MGA_WR26 0x2d68
+#define MGA_WR27 0x2d6c
+#define MGA_WR28 0x2d70
+#define MGA_WR29 0x2d74
+#define MGA_WR30 0x2d78
+#define MGA_WR31 0x2d7c
+#define MGA_WR32 0x2d80
+#define MGA_WR33 0x2d84
+#define MGA_WR34 0x2d88
+#define MGA_WR35 0x2d8c
+#define MGA_WR36 0x2d90
+#define MGA_WR37 0x2d94
+#define MGA_WR38 0x2d98
+#define MGA_WR39 0x2d9c
+#define MGA_WR40 0x2da0
+#define MGA_WR41 0x2da4
+#define MGA_WR42 0x2da8
+#define MGA_WR43 0x2dac
+#define MGA_WR44 0x2db0
+#define MGA_WR45 0x2db4
+#define MGA_WR46 0x2db8
+#define MGA_WR47 0x2dbc
+#define MGA_WR48 0x2dc0
+#define MGA_WR49 0x2dc4
+#define MGA_WR50 0x2dc8
+#define MGA_WR51 0x2dcc
+#define MGA_WR52 0x2dd0
+#define MGA_WR53 0x2dd4
+#define MGA_WR54 0x2dd8
+#define MGA_WR55 0x2ddc
+#define MGA_WR56 0x2de0
+#define MGA_WR57 0x2de4
+#define MGA_WR58 0x2de8
+#define MGA_WR59 0x2dec
+#define MGA_WR60 0x2df0
+#define MGA_WR61 0x2df4
+#define MGA_WR62 0x2df8
+#define MGA_WR63 0x2dfc
+# define MGA_G400_WR_MAGIC (1 << 6)
+# define MGA_G400_WR56_MAGIC 0x46480000 /* 12800.0f */
+
+
+#define MGA_ILOAD_ALIGN 64
+#define MGA_ILOAD_MASK (MGA_ILOAD_ALIGN - 1)
+
+#define MGA_DWGCTL_FLUSH (MGA_OPCOD_TEXTURE_TRAP | \
+ MGA_ATYPE_I | \
+ MGA_ZMODE_NOZCMP | \
+ MGA_ARZERO | \
+ MGA_SGNZERO | \
+ MGA_BOP_SRC | \
+ (15 << MGA_TRANS_SHIFT))
+
+#define MGA_DWGCTL_CLEAR (MGA_OPCOD_TRAP | \
+ MGA_ZMODE_NOZCMP | \
+ MGA_SOLID | \
+ MGA_ARZERO | \
+ MGA_SGNZERO | \
+ MGA_SHIFTZERO | \
+ MGA_BOP_SRC | \
+ (0 << MGA_TRANS_SHIFT) | \
+ MGA_BLTMOD_BMONOLEF | \
+ MGA_TRANSC | \
+ MGA_CLIPDIS)
+
+#define MGA_DWGCTL_COPY (MGA_OPCOD_BITBLT | \
+ MGA_ATYPE_RPL | \
+ MGA_SGNZERO | \
+ MGA_SHIFTZERO | \
+ MGA_BOP_SRC | \
+ (0 << MGA_TRANS_SHIFT) | \
+ MGA_BLTMOD_BFCOL | \
+ MGA_CLIPDIS)
+
+/* Simple idle test.
+ */
+static __inline__ int mga_is_idle( drm_mga_private_t *dev_priv )
+{
+ u32 status = MGA_READ( MGA_STATUS ) & MGA_ENGINE_IDLE_MASK;
+ return ( status == MGA_ENDPRDMASTS );
+}
#endif
diff --git a/bsd/mga/mga_state.c b/bsd/mga/mga_state.c
index fb365631..320e2b3e 100644
--- a/bsd/mga/mga_state.c
+++ b/bsd/mga/mga_state.c
@@ -1,4 +1,4 @@
-/* mga_state.c -- State support for mga g200/g400
+/* mga_state.c -- State support for MGA G200/G400 -*- linux-c -*-
* Created: Thu Jan 27 02:53:43 2000 by jhartmann@precisioninsight.com
*
* Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
@@ -11,431 +11,408 @@
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
- *
+ *
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
- *
+ *
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
- * DEALINGS IN THE SOFTWARE.
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
*
- * Authors: Jeff Hartmann <jhartmann@precisioninsight.com>
- * Keith Whitwell <keithw@precisioninsight.com>
+ * Authors:
+ * Jeff Hartmann <jhartmann@valinux.com>
+ * Keith Whitwell <keithw@valinux.com>
*
+ * Rewritten by:
+ * Gareth Hughes <gareth@valinux.com>
*/
#define __NO_VERSION__
+#include "mga.h"
#include "drmP.h"
#include "mga_drv.h"
#include "drm.h"
-typedef u_int16_t u16;
-typedef u_int32_t u32;
-
-#define MGAEMITCLIP_SIZE 10
-#define MGAEMITCTX_SIZE 20
-#define MGAG200EMITTEX_SIZE 20
-#define MGAG400EMITTEX0_SIZE 30
-#define MGAG400EMITTEX1_SIZE 25
-#define MGAG400EMITPIPE_SIZE 50
-#define MGAG200EMITPIPE_SIZE 15
-
-#define MAX_STATE_SIZE ((MGAEMITCLIP_SIZE * MGA_NR_SAREA_CLIPRECTS) + \
- MGAEMITCTX_SIZE + MGAG400EMITTEX0_SIZE + \
- MGAG400EMITTEX1_SIZE + MGAG400EMITPIPE_SIZE)
-
+/* ================================================================
+ * DMA hardware state programming functions
+ */
-static void mgaEmitClipRect(drm_mga_private_t * dev_priv,
- drm_clip_rect_t * box)
+static void mga_emit_clip_rect( drm_mga_private_t *dev_priv,
+ drm_clip_rect_t *box )
{
drm_mga_sarea_t *sarea_priv = dev_priv->sarea_priv;
- unsigned int *regs = sarea_priv->ContextState;
- PRIMLOCALS;
- DRM_DEBUG("%s\n", __FUNCTION__);
+ drm_mga_context_regs_t *ctx = &sarea_priv->context_state;
+ unsigned int pitch = dev_priv->front_pitch;
+ DMA_LOCALS;
- /* This takes 10 dwords */
- PRIMGETPTR(dev_priv);
+ BEGIN_DMA( 2 );
- /* Force reset of dwgctl (eliminates clip disable) */
- if (dev_priv->chipset == MGA_CARD_TYPE_G400) {
-#if 0
- PRIMOUTREG(MGAREG_DMAPAD, 0);
- PRIMOUTREG(MGAREG_DWGSYNC, 0);
- PRIMOUTREG(MGAREG_DWGSYNC, 0);
- PRIMOUTREG(MGAREG_DWGCTL, regs[MGA_CTXREG_DWGCTL]);
-#else
- PRIMOUTREG(MGAREG_DWGCTL, regs[MGA_CTXREG_DWGCTL]);
- PRIMOUTREG(MGAREG_LEN + MGAREG_MGA_EXEC, 0x80000000);
- PRIMOUTREG(MGAREG_DWGCTL, regs[MGA_CTXREG_DWGCTL]);
- PRIMOUTREG(MGAREG_LEN + MGAREG_MGA_EXEC, 0x80000000);
-#endif
+ /* Force reset of DWGCTL on G400 (eliminates clip disable bit).
+ */
+ if ( dev_priv->chipset == MGA_CARD_TYPE_G400 ) {
+ DMA_BLOCK( MGA_DWGCTL, ctx->dwgctl,
+ MGA_LEN + MGA_EXEC, 0x80000000,
+ MGA_DWGCTL, ctx->dwgctl,
+ MGA_LEN + MGA_EXEC, 0x80000000 );
}
- PRIMOUTREG(MGAREG_DMAPAD, 0);
- PRIMOUTREG(MGAREG_CXBNDRY, ((box->x2) << 16) | (box->x1));
- PRIMOUTREG(MGAREG_YTOP, box->y1 * dev_priv->stride / dev_priv->cpp);
- PRIMOUTREG(MGAREG_YBOT, box->y2 * dev_priv->stride / dev_priv->cpp);
+ DMA_BLOCK( MGA_DMAPAD, 0x00000000,
+ MGA_CXBNDRY, (box->x2 << 16) | box->x1,
+ MGA_YTOP, box->y1 * pitch,
+ MGA_YBOT, box->y2 * pitch );
- PRIMADVANCE(dev_priv);
+ ADVANCE_DMA();
}
-static void mgaEmitContext(drm_mga_private_t * dev_priv)
+static __inline__ void mga_g200_emit_context( drm_mga_private_t *dev_priv )
{
drm_mga_sarea_t *sarea_priv = dev_priv->sarea_priv;
- unsigned int *regs = sarea_priv->ContextState;
- PRIMLOCALS;
- DRM_DEBUG("%s\n", __FUNCTION__);
-
- /* This takes a max of 20 dwords */
- PRIMGETPTR(dev_priv);
-
- PRIMOUTREG(MGAREG_DSTORG, regs[MGA_CTXREG_DSTORG]);
- PRIMOUTREG(MGAREG_MACCESS, regs[MGA_CTXREG_MACCESS]);
- PRIMOUTREG(MGAREG_PLNWT, regs[MGA_CTXREG_PLNWT]);
- PRIMOUTREG(MGAREG_DWGCTL, regs[MGA_CTXREG_DWGCTL]);
-
- PRIMOUTREG(MGAREG_ALPHACTRL, regs[MGA_CTXREG_ALPHACTRL]);
- PRIMOUTREG(MGAREG_FOGCOL, regs[MGA_CTXREG_FOGCOLOR]);
- PRIMOUTREG(MGAREG_WFLAG, regs[MGA_CTXREG_WFLAG]);
- PRIMOUTREG(MGAREG_ZORG, dev_priv->depthOffset); /* invarient */
-
- if (dev_priv->chipset == MGA_CARD_TYPE_G400) {
- PRIMOUTREG(MGAREG_WFLAG1, regs[MGA_CTXREG_WFLAG]);
- PRIMOUTREG(MGAREG_TDUALSTAGE0, regs[MGA_CTXREG_TDUAL0]);
- PRIMOUTREG(MGAREG_TDUALSTAGE1, regs[MGA_CTXREG_TDUAL1]);
- PRIMOUTREG(MGAREG_FCOL, regs[MGA_CTXREG_FCOL]);
-
- PRIMOUTREG(MGAREG_STENCIL, regs[MGA_CTXREG_STENCIL]);
- PRIMOUTREG(MGAREG_STENCILCTL, regs[MGA_CTXREG_STENCILCTL]);
- PRIMOUTREG(MGAREG_DMAPAD, 0);
- PRIMOUTREG(MGAREG_DMAPAD, 0);
+ drm_mga_context_regs_t *ctx = &sarea_priv->context_state;
+ DMA_LOCALS;
- } else {
- PRIMOUTREG(MGAREG_FCOL, regs[MGA_CTXREG_FCOL]);
- PRIMOUTREG(MGAREG_DMAPAD, 0);
- PRIMOUTREG(MGAREG_DMAPAD, 0);
- PRIMOUTREG(MGAREG_DMAPAD, 0);
- }
+ BEGIN_DMA( 3 );
+
+ DMA_BLOCK( MGA_DSTORG, ctx->dstorg,
+ MGA_MACCESS, ctx->maccess,
+ MGA_PLNWT, ctx->plnwt,
+ MGA_DWGCTL, ctx->dwgctl );
- PRIMADVANCE(dev_priv);
+ DMA_BLOCK( MGA_ALPHACTRL, ctx->alphactrl,
+ MGA_FOGCOL, ctx->fogcolor,
+ MGA_WFLAG, ctx->wflag,
+ MGA_ZORG, dev_priv->depth_offset );
+
+ DMA_BLOCK( MGA_FCOL, ctx->fcol,
+ MGA_DMAPAD, 0x00000000,
+ MGA_DMAPAD, 0x00000000,
+ MGA_DMAPAD, 0x00000000 );
+
+ ADVANCE_DMA();
}
-static void mgaG200EmitTex(drm_mga_private_t * dev_priv)
+static __inline__ void mga_g400_emit_context( drm_mga_private_t *dev_priv )
{
drm_mga_sarea_t *sarea_priv = dev_priv->sarea_priv;
- unsigned int *regs = sarea_priv->TexState[0];
- PRIMLOCALS;
- DRM_DEBUG("%s\n", __FUNCTION__);
-
- PRIMGETPTR(dev_priv);
+ drm_mga_context_regs_t *ctx = &sarea_priv->context_state;
+ DMA_LOCALS;
- /* This takes 20 dwords */
+ BEGIN_DMA( 4 );
- PRIMOUTREG(MGAREG_TEXCTL2, regs[MGA_TEXREG_CTL2]);
- PRIMOUTREG(MGAREG_TEXCTL, regs[MGA_TEXREG_CTL]);
- PRIMOUTREG(MGAREG_TEXFILTER, regs[MGA_TEXREG_FILTER]);
- PRIMOUTREG(MGAREG_TEXBORDERCOL, regs[MGA_TEXREG_BORDERCOL]);
+ DMA_BLOCK( MGA_DSTORG, ctx->dstorg,
+ MGA_MACCESS, ctx->maccess,
+ MGA_PLNWT, ctx->plnwt,
+ MGA_DWGCTL, ctx->dwgctl );
- PRIMOUTREG(MGAREG_TEXORG, regs[MGA_TEXREG_ORG]);
- PRIMOUTREG(MGAREG_TEXORG1, regs[MGA_TEXREG_ORG1]);
- PRIMOUTREG(MGAREG_TEXORG2, regs[MGA_TEXREG_ORG2]);
- PRIMOUTREG(MGAREG_TEXORG3, regs[MGA_TEXREG_ORG3]);
+ DMA_BLOCK( MGA_ALPHACTRL, ctx->alphactrl,
+ MGA_FOGCOL, ctx->fogcolor,
+ MGA_WFLAG, ctx->wflag,
+ MGA_ZORG, dev_priv->depth_offset );
- PRIMOUTREG(MGAREG_TEXORG4, regs[MGA_TEXREG_ORG4]);
- PRIMOUTREG(MGAREG_TEXWIDTH, regs[MGA_TEXREG_WIDTH]);
- PRIMOUTREG(MGAREG_TEXHEIGHT, regs[MGA_TEXREG_HEIGHT]);
- PRIMOUTREG(MGAREG_WR24, regs[MGA_TEXREG_WIDTH]);
+ DMA_BLOCK( MGA_WFLAG1, ctx->wflag,
+ MGA_TDUALSTAGE0, ctx->tdualstage0,
+ MGA_TDUALSTAGE1, ctx->tdualstage1,
+ MGA_FCOL, ctx->fcol );
- PRIMOUTREG(MGAREG_WR34, regs[MGA_TEXREG_HEIGHT]);
- PRIMOUTREG(MGAREG_TEXTRANS, 0xffff);
- PRIMOUTREG(MGAREG_TEXTRANSHIGH, 0xffff);
- PRIMOUTREG(MGAREG_DMAPAD, 0);
+ DMA_BLOCK( MGA_STENCIL, ctx->stencil,
+ MGA_STENCILCTL, ctx->stencilctl,
+ MGA_DMAPAD, 0x00000000,
+ MGA_DMAPAD, 0x00000000 );
- PRIMADVANCE(dev_priv);
+ ADVANCE_DMA();
}
-#define TMC_dualtex_enable 0x80
-
-static void mgaG400EmitTex0(drm_mga_private_t * dev_priv)
+static __inline__ void mga_g200_emit_tex0( drm_mga_private_t *dev_priv )
{
drm_mga_sarea_t *sarea_priv = dev_priv->sarea_priv;
- unsigned int *regs = sarea_priv->TexState[0];
- PRIMLOCALS;
- DRM_DEBUG("%s\n", __FUNCTION__);
-
- PRIMGETPTR(dev_priv);
-
- /* This takes a max of 30 dwords */
-
- PRIMOUTREG(MGAREG_TEXCTL2, regs[MGA_TEXREG_CTL2] | 0x00008000);
- PRIMOUTREG(MGAREG_TEXCTL, regs[MGA_TEXREG_CTL]);
- PRIMOUTREG(MGAREG_TEXFILTER, regs[MGA_TEXREG_FILTER]);
- PRIMOUTREG(MGAREG_TEXBORDERCOL, regs[MGA_TEXREG_BORDERCOL]);
+ drm_mga_texture_regs_t *tex = &sarea_priv->tex_state[0];
+ DMA_LOCALS;
- PRIMOUTREG(MGAREG_TEXORG, regs[MGA_TEXREG_ORG]);
- PRIMOUTREG(MGAREG_TEXORG1, regs[MGA_TEXREG_ORG1]);
- PRIMOUTREG(MGAREG_TEXORG2, regs[MGA_TEXREG_ORG2]);
- PRIMOUTREG(MGAREG_TEXORG3, regs[MGA_TEXREG_ORG3]);
+ BEGIN_DMA( 4 );
- PRIMOUTREG(MGAREG_TEXORG4, regs[MGA_TEXREG_ORG4]);
- PRIMOUTREG(MGAREG_TEXWIDTH, regs[MGA_TEXREG_WIDTH]);
- PRIMOUTREG(MGAREG_TEXHEIGHT, regs[MGA_TEXREG_HEIGHT]);
- PRIMOUTREG(MGAREG_WR49, 0);
+ DMA_BLOCK( MGA_TEXCTL2, tex->texctl2,
+ MGA_TEXCTL, tex->texctl,
+ MGA_TEXFILTER, tex->texfilter,
+ MGA_TEXBORDERCOL, tex->texbordercol );
- PRIMOUTREG(MGAREG_WR57, 0);
- PRIMOUTREG(MGAREG_WR53, 0);
- PRIMOUTREG(MGAREG_WR61, 0);
- PRIMOUTREG(MGAREG_WR52, 0x40);
+ DMA_BLOCK( MGA_TEXORG, tex->texorg,
+ MGA_TEXORG1, tex->texorg1,
+ MGA_TEXORG2, tex->texorg2,
+ MGA_TEXORG3, tex->texorg3 );
- PRIMOUTREG(MGAREG_WR60, 0x40);
- PRIMOUTREG(MGAREG_WR54, regs[MGA_TEXREG_WIDTH] | 0x40);
- PRIMOUTREG(MGAREG_WR62, regs[MGA_TEXREG_HEIGHT] | 0x40);
- PRIMOUTREG(MGAREG_DMAPAD, 0);
+ DMA_BLOCK( MGA_TEXORG4, tex->texorg4,
+ MGA_TEXWIDTH, tex->texwidth,
+ MGA_TEXHEIGHT, tex->texheight,
+ MGA_WR24, tex->texwidth );
- PRIMOUTREG(MGAREG_DMAPAD, 0);
- PRIMOUTREG(MGAREG_DMAPAD, 0);
+ DMA_BLOCK( MGA_WR34, tex->texheight,
+ MGA_TEXTRANS, 0x0000ffff,
+ MGA_TEXTRANSHIGH, 0x0000ffff,
+ MGA_DMAPAD, 0x00000000 );
- PRIMOUTREG(MGAREG_TEXTRANS, 0xffff);
- PRIMOUTREG(MGAREG_TEXTRANSHIGH, 0xffff);
-
- PRIMADVANCE(dev_priv);
+ ADVANCE_DMA();
}
-#define TMC_map1_enable 0x80000000
-
-static void mgaG400EmitTex1(drm_mga_private_t * dev_priv)
+static __inline__ void mga_g400_emit_tex0( drm_mga_private_t *dev_priv )
{
drm_mga_sarea_t *sarea_priv = dev_priv->sarea_priv;
- unsigned int *regs = sarea_priv->TexState[1];
- PRIMLOCALS;
- DRM_DEBUG("%s\n", __FUNCTION__);
-
- PRIMGETPTR(dev_priv);
-
- /* This takes 25 dwords */
-
- PRIMOUTREG(MGAREG_TEXCTL2,
- regs[MGA_TEXREG_CTL2] | TMC_map1_enable | 0x00008000);
- PRIMOUTREG(MGAREG_TEXCTL, regs[MGA_TEXREG_CTL]);
- PRIMOUTREG(MGAREG_TEXFILTER, regs[MGA_TEXREG_FILTER]);
- PRIMOUTREG(MGAREG_TEXBORDERCOL, regs[MGA_TEXREG_BORDERCOL]);
+ drm_mga_texture_regs_t *tex = &sarea_priv->tex_state[0];
+ DMA_LOCALS;
+
+ BEGIN_DMA( 6 );
+
+ DMA_BLOCK( MGA_TEXCTL2, tex->texctl2 | MGA_G400_TC2_MAGIC,
+ MGA_TEXCTL, tex->texctl,
+ MGA_TEXFILTER, tex->texfilter,
+ MGA_TEXBORDERCOL, tex->texbordercol );
+
+ DMA_BLOCK( MGA_TEXORG, tex->texorg,
+ MGA_TEXORG1, tex->texorg1,
+ MGA_TEXORG2, tex->texorg2,
+ MGA_TEXORG3, tex->texorg3 );
+
+ DMA_BLOCK( MGA_TEXORG4, tex->texorg4,
+ MGA_TEXWIDTH, tex->texwidth,
+ MGA_TEXHEIGHT, tex->texheight,
+ MGA_WR49, 0x00000000 );
+
+ DMA_BLOCK( MGA_WR57, 0x00000000,
+ MGA_WR53, 0x00000000,
+ MGA_WR61, 0x00000000,
+ MGA_WR52, MGA_G400_WR_MAGIC );
+
+ DMA_BLOCK( MGA_WR60, MGA_G400_WR_MAGIC,
+ MGA_WR54, tex->texwidth | MGA_G400_WR_MAGIC,
+ MGA_WR62, tex->texheight | MGA_G400_WR_MAGIC,
+ MGA_DMAPAD, 0x00000000 );
+
+ DMA_BLOCK( MGA_DMAPAD, 0x00000000,
+ MGA_DMAPAD, 0x00000000,
+ MGA_TEXTRANS, 0x0000ffff,
+ MGA_TEXTRANSHIGH, 0x0000ffff );
+
+ ADVANCE_DMA();
+}
- PRIMOUTREG(MGAREG_TEXORG, regs[MGA_TEXREG_ORG]);
- PRIMOUTREG(MGAREG_TEXORG1, regs[MGA_TEXREG_ORG1]);
- PRIMOUTREG(MGAREG_TEXORG2, regs[MGA_TEXREG_ORG2]);
- PRIMOUTREG(MGAREG_TEXORG3, regs[MGA_TEXREG_ORG3]);
+static __inline__ void mga_g400_emit_tex1( drm_mga_private_t *dev_priv )
+{
+ drm_mga_sarea_t *sarea_priv = dev_priv->sarea_priv;
+ drm_mga_texture_regs_t *tex = &sarea_priv->tex_state[1];
+ DMA_LOCALS;
+
+ BEGIN_DMA( 5 );
+
+ DMA_BLOCK( MGA_TEXCTL2, (tex->texctl2 |
+ MGA_MAP1_ENABLE |
+ MGA_G400_TC2_MAGIC),
+ MGA_TEXCTL, tex->texctl,
+ MGA_TEXFILTER, tex->texfilter,
+ MGA_TEXBORDERCOL, tex->texbordercol );
+
+ DMA_BLOCK( MGA_TEXORG, tex->texorg,
+ MGA_TEXORG1, tex->texorg1,
+ MGA_TEXORG2, tex->texorg2,
+ MGA_TEXORG3, tex->texorg3 );
+
+ DMA_BLOCK( MGA_TEXORG4, tex->texorg4,
+ MGA_TEXWIDTH, tex->texwidth,
+ MGA_TEXHEIGHT, tex->texheight,
+ MGA_WR49, 0x00000000 );
+
+ DMA_BLOCK( MGA_WR57, 0x00000000,
+ MGA_WR53, 0x00000000,
+ MGA_WR61, 0x00000000,
+ MGA_WR52, tex->texwidth | MGA_G400_WR_MAGIC );
+
+ DMA_BLOCK( MGA_WR60, tex->texheight | MGA_G400_WR_MAGIC,
+ MGA_TEXTRANS, 0x0000ffff,
+ MGA_TEXTRANSHIGH, 0x0000ffff,
+ MGA_TEXCTL2, tex->texctl2 | MGA_G400_TC2_MAGIC );
+
+ ADVANCE_DMA();
+}
- PRIMOUTREG(MGAREG_TEXORG4, regs[MGA_TEXREG_ORG4]);
- PRIMOUTREG(MGAREG_TEXWIDTH, regs[MGA_TEXREG_WIDTH]);
- PRIMOUTREG(MGAREG_TEXHEIGHT, regs[MGA_TEXREG_HEIGHT]);
- PRIMOUTREG(MGAREG_WR49, 0);
+static __inline__ void mga_g200_emit_pipe( drm_mga_private_t *dev_priv )
+{
+ drm_mga_sarea_t *sarea_priv = dev_priv->sarea_priv;
+ unsigned int pipe = sarea_priv->warp_pipe;
+ DMA_LOCALS;
- PRIMOUTREG(MGAREG_WR57, 0);
- PRIMOUTREG(MGAREG_WR53, 0);
- PRIMOUTREG(MGAREG_WR61, 0);
- PRIMOUTREG(MGAREG_WR52, regs[MGA_TEXREG_WIDTH] | 0x40);
+ BEGIN_DMA( 3 );
- PRIMOUTREG(MGAREG_WR60, regs[MGA_TEXREG_HEIGHT] | 0x40);
+ DMA_BLOCK( MGA_WIADDR, MGA_WMODE_SUSPEND,
+ MGA_WVRTXSZ, 0x00000007,
+ MGA_WFLAG, 0x00000000,
+ MGA_WR24, 0x00000000 );
- PRIMOUTREG(MGAREG_TEXTRANS, 0xffff);
- PRIMOUTREG(MGAREG_TEXTRANSHIGH, 0xffff);
- PRIMOUTREG(MGAREG_TEXCTL2, regs[MGA_TEXREG_CTL2] | 0x00008000);
+ DMA_BLOCK( MGA_WR25, 0x00000100,
+ MGA_WR34, 0x00000000,
+ MGA_WR42, 0x0000ffff,
+ MGA_WR60, 0x0000ffff );
- PRIMADVANCE(dev_priv);
+ /* Padding required to to hardware bug.
+ */
+ DMA_BLOCK( MGA_DMAPAD, 0xffffffff,
+ MGA_DMAPAD, 0xffffffff,
+ MGA_DMAPAD, 0xffffffff,
+ MGA_WIADDR, (dev_priv->warp_pipe_phys[pipe] |
+ MGA_WMODE_START |
+ MGA_WAGP_ENABLE) );
+
+ ADVANCE_DMA();
}
-#define MAGIC_FPARAM_HEX_VALUE 0x46480000
-/* This is the hex value of 12800.0f which is a magic value we must
- * set in wr56.
- */
-
-
-#define EMIT_PIPE 50
-static void mgaG400EmitPipe(drm_mga_private_t * dev_priv)
+static __inline__ void mga_g400_emit_pipe( drm_mga_private_t *dev_priv )
{
drm_mga_sarea_t *sarea_priv = dev_priv->sarea_priv;
- unsigned int pipe = sarea_priv->WarpPipe;
- PRIMLOCALS;
- DRM_DEBUG("%s\n", __FUNCTION__);
-
- PRIMGETPTR(dev_priv);
-
- /* This takes 50 dwords */
-
- /* Establish vertex size.
- */
- PRIMOUTREG(MGAREG_WIADDR2, WIA_wmode_suspend);
- PRIMOUTREG(MGAREG_DMAPAD, 0);
- PRIMOUTREG(MGAREG_DMAPAD, 0);
- PRIMOUTREG(MGAREG_DMAPAD, 0);
-
- if (pipe & MGA_T2) {
- PRIMOUTREG(MGAREG_WVRTXSZ, 0x00001e09);
- PRIMOUTREG(MGAREG_DMAPAD, 0);
- PRIMOUTREG(MGAREG_DMAPAD, 0);
- PRIMOUTREG(MGAREG_DMAPAD, 0);
-
- PRIMOUTREG(MGAREG_WACCEPTSEQ, 0);
- PRIMOUTREG(MGAREG_WACCEPTSEQ, 0);
- PRIMOUTREG(MGAREG_WACCEPTSEQ, 0);
- PRIMOUTREG(MGAREG_WACCEPTSEQ, 0x1e000000);
+ unsigned int pipe = sarea_priv->warp_pipe;
+ DMA_LOCALS;
+
+ BEGIN_DMA( 10 );
+
+ DMA_BLOCK( MGA_WIADDR2, MGA_WMODE_SUSPEND,
+ MGA_DMAPAD, 0x00000000,
+ MGA_DMAPAD, 0x00000000,
+ MGA_DMAPAD, 0x00000000 );
+
+ if ( pipe & MGA_T2 ) {
+ DMA_BLOCK( MGA_WVRTXSZ, 0x00001e09,
+ MGA_DMAPAD, 0x00000000,
+ MGA_DMAPAD, 0x00000000,
+ MGA_DMAPAD, 0x00000000 );
+
+ DMA_BLOCK( MGA_WACCEPTSEQ, 0x00000000,
+ MGA_WACCEPTSEQ, 0x00000000,
+ MGA_WACCEPTSEQ, 0x00000000,
+ MGA_WACCEPTSEQ, 0x1e000000 );
} else {
- if (dev_priv->WarpPipe & MGA_T2) {
+ if ( dev_priv->warp_pipe & MGA_T2 ) {
/* Flush the WARP pipe */
- PRIMOUTREG(MGAREG_YDST, 0);
- PRIMOUTREG(MGAREG_FXLEFT, 0);
- PRIMOUTREG(MGAREG_FXRIGHT, 1);
- PRIMOUTREG(MGAREG_DWGCTL, MGA_FLUSH_CMD);
-
- PRIMOUTREG(MGAREG_LEN + MGAREG_MGA_EXEC, 1);
- PRIMOUTREG(MGAREG_DWGSYNC, 0x7000);
- PRIMOUTREG(MGAREG_TEXCTL2, 0x00008000);
- PRIMOUTREG(MGAREG_LEN + MGAREG_MGA_EXEC, 0);
-
- PRIMOUTREG(MGAREG_TEXCTL2, 0x80 | 0x00008000);
- PRIMOUTREG(MGAREG_LEN + MGAREG_MGA_EXEC, 0);
- PRIMOUTREG(MGAREG_TEXCTL2, 0x00008000);
- PRIMOUTREG(MGAREG_DMAPAD, 0);
+ DMA_BLOCK( MGA_YDST, 0x00000000,
+ MGA_FXLEFT, 0x00000000,
+ MGA_FXRIGHT, 0x00000001,
+ MGA_DWGCTL, MGA_DWGCTL_FLUSH );
+
+ DMA_BLOCK( MGA_LEN + MGA_EXEC, 0x00000001,
+ MGA_DWGSYNC, 0x00007000,
+ MGA_TEXCTL2, MGA_G400_TC2_MAGIC,
+ MGA_LEN + MGA_EXEC, 0x00000000 );
+
+ DMA_BLOCK( MGA_TEXCTL2, (MGA_DUALTEX |
+ MGA_G400_TC2_MAGIC),
+ MGA_LEN + MGA_EXEC, 0x00000000,
+ MGA_TEXCTL2, MGA_G400_TC2_MAGIC,
+ MGA_DMAPAD, 0x00000000 );
}
- PRIMOUTREG(MGAREG_WVRTXSZ, 0x00001807);
- PRIMOUTREG(MGAREG_DMAPAD, 0);
- PRIMOUTREG(MGAREG_DMAPAD, 0);
- PRIMOUTREG(MGAREG_DMAPAD, 0);
+ DMA_BLOCK( MGA_WVRTXSZ, 0x00001807,
+ MGA_DMAPAD, 0x00000000,
+ MGA_DMAPAD, 0x00000000,
+ MGA_DMAPAD, 0x00000000 );
- PRIMOUTREG(MGAREG_WACCEPTSEQ, 0);
- PRIMOUTREG(MGAREG_WACCEPTSEQ, 0);
- PRIMOUTREG(MGAREG_WACCEPTSEQ, 0);
- PRIMOUTREG(MGAREG_WACCEPTSEQ, 0x18000000);
+ DMA_BLOCK( MGA_WACCEPTSEQ, 0x00000000,
+ MGA_WACCEPTSEQ, 0x00000000,
+ MGA_WACCEPTSEQ, 0x00000000,
+ MGA_WACCEPTSEQ, 0x18000000 );
}
- PRIMOUTREG(MGAREG_WFLAG, 0);
- PRIMOUTREG(MGAREG_WFLAG1, 0);
- PRIMOUTREG(MGAREG_WR56, MAGIC_FPARAM_HEX_VALUE);
- PRIMOUTREG(MGAREG_DMAPAD, 0);
-
- PRIMOUTREG(MGAREG_WR49, 0); /* Tex stage 0 */
- PRIMOUTREG(MGAREG_WR57, 0); /* Tex stage 0 */
- PRIMOUTREG(MGAREG_WR53, 0); /* Tex stage 1 */
- PRIMOUTREG(MGAREG_WR61, 0); /* Tex stage 1 */
-
-
- PRIMOUTREG(MGAREG_WR54, 0x40); /* Tex stage 0 : w */
- PRIMOUTREG(MGAREG_WR62, 0x40); /* Tex stage 0 : h */
- PRIMOUTREG(MGAREG_WR52, 0x40); /* Tex stage 1 : w */
- PRIMOUTREG(MGAREG_WR60, 0x40); /* Tex stage 1 : h */
-
-
- /* Dma pading required due to hw bug */
- PRIMOUTREG(MGAREG_DMAPAD, 0xffffffff);
- PRIMOUTREG(MGAREG_DMAPAD, 0xffffffff);
- PRIMOUTREG(MGAREG_DMAPAD, 0xffffffff);
- PRIMOUTREG(MGAREG_WIADDR2,
- (u32) (dev_priv->WarpIndex[pipe].
- phys_addr | WIA_wmode_start | WIA_wagp_agp));
- PRIMADVANCE(dev_priv);
+ DMA_BLOCK( MGA_WFLAG, 0x00000000,
+ MGA_WFLAG1, 0x00000000,
+ MGA_WR56, MGA_G400_WR56_MAGIC,
+ MGA_DMAPAD, 0x00000000 );
+
+ DMA_BLOCK( MGA_WR49, 0x00000000, /* tex0 */
+ MGA_WR57, 0x00000000, /* tex0 */
+ MGA_WR53, 0x00000000, /* tex1 */
+ MGA_WR61, 0x00000000 ); /* tex1 */
+
+ DMA_BLOCK( MGA_WR54, MGA_G400_WR_MAGIC, /* tex0 width */
+ MGA_WR62, MGA_G400_WR_MAGIC, /* tex0 height */
+ MGA_WR52, MGA_G400_WR_MAGIC, /* tex1 width */
+ MGA_WR60, MGA_G400_WR_MAGIC ); /* tex1 height */
+
+ /* Padding required to to hardware bug */
+ DMA_BLOCK( MGA_DMAPAD, 0xffffffff,
+ MGA_DMAPAD, 0xffffffff,
+ MGA_DMAPAD, 0xffffffff,
+ MGA_WIADDR2, (dev_priv->warp_pipe_phys[pipe] |
+ MGA_WMODE_START |
+ MGA_WAGP_ENABLE) );
+
+ ADVANCE_DMA();
}
-static void mgaG200EmitPipe(drm_mga_private_t * dev_priv)
+static void mga_g200_emit_state( drm_mga_private_t *dev_priv )
{
drm_mga_sarea_t *sarea_priv = dev_priv->sarea_priv;
- unsigned int pipe = sarea_priv->WarpPipe;
- PRIMLOCALS;
- DRM_DEBUG("%s\n", __FUNCTION__);
-
- PRIMGETPTR(dev_priv);
-
- /* This takes 15 dwords */
-
- PRIMOUTREG(MGAREG_WIADDR, WIA_wmode_suspend);
- PRIMOUTREG(MGAREG_WVRTXSZ, 7);
- PRIMOUTREG(MGAREG_WFLAG, 0);
- PRIMOUTREG(MGAREG_WR24, 0); /* tex w/h */
+ unsigned int dirty = sarea_priv->dirty;
- PRIMOUTREG(MGAREG_WR25, 0x100);
- PRIMOUTREG(MGAREG_WR34, 0); /* tex w/h */
- PRIMOUTREG(MGAREG_WR42, 0xFFFF);
- PRIMOUTREG(MGAREG_WR60, 0xFFFF);
+ if ( sarea_priv->warp_pipe != dev_priv->warp_pipe ) {
+ mga_g200_emit_pipe( dev_priv );
+ dev_priv->warp_pipe = sarea_priv->warp_pipe;
+ }
- /* Dma pading required due to hw bug */
- PRIMOUTREG(MGAREG_DMAPAD, 0xffffffff);
- PRIMOUTREG(MGAREG_DMAPAD, 0xffffffff);
- PRIMOUTREG(MGAREG_DMAPAD, 0xffffffff);
- PRIMOUTREG(MGAREG_WIADDR,
- (u32) (dev_priv->WarpIndex[pipe].
- phys_addr | WIA_wmode_start | WIA_wagp_agp));
+ if ( dirty & MGA_UPLOAD_CONTEXT ) {
+ mga_g200_emit_context( dev_priv );
+ sarea_priv->dirty &= ~MGA_UPLOAD_CONTEXT;
+ }
- PRIMADVANCE(dev_priv);
+ if ( dirty & MGA_UPLOAD_TEX0 ) {
+ mga_g200_emit_tex0( dev_priv );
+ sarea_priv->dirty &= ~MGA_UPLOAD_TEX0;
+ }
}
-static void mgaEmitState(drm_mga_private_t * dev_priv)
+static void mga_g400_emit_state( drm_mga_private_t *dev_priv )
{
drm_mga_sarea_t *sarea_priv = dev_priv->sarea_priv;
unsigned int dirty = sarea_priv->dirty;
- DRM_DEBUG("%s\n", __FUNCTION__);
-
- if (dev_priv->chipset == MGA_CARD_TYPE_G400) {
- int multitex = sarea_priv->WarpPipe & MGA_T2;
-
- if (sarea_priv->WarpPipe != dev_priv->WarpPipe) {
- mgaG400EmitPipe(dev_priv);
- dev_priv->WarpPipe = sarea_priv->WarpPipe;
- }
-
- if (dirty & MGA_UPLOAD_CTX) {
- mgaEmitContext(dev_priv);
- sarea_priv->dirty &= ~MGA_UPLOAD_CTX;
- }
+ int multitex = sarea_priv->warp_pipe & MGA_T2;
- if (dirty & MGA_UPLOAD_TEX0) {
- mgaG400EmitTex0(dev_priv);
- sarea_priv->dirty &= ~MGA_UPLOAD_TEX0;
- }
+ if ( sarea_priv->warp_pipe != dev_priv->warp_pipe ) {
+ mga_g400_emit_pipe( dev_priv );
+ dev_priv->warp_pipe = sarea_priv->warp_pipe;
+ }
- if ((dirty & MGA_UPLOAD_TEX1) && multitex) {
- mgaG400EmitTex1(dev_priv);
- sarea_priv->dirty &= ~MGA_UPLOAD_TEX1;
- }
- } else {
- if (sarea_priv->WarpPipe != dev_priv->WarpPipe) {
- mgaG200EmitPipe(dev_priv);
- dev_priv->WarpPipe = sarea_priv->WarpPipe;
- }
+ if ( dirty & MGA_UPLOAD_CONTEXT ) {
+ mga_g400_emit_context( dev_priv );
+ sarea_priv->dirty &= ~MGA_UPLOAD_CONTEXT;
+ }
- if (dirty & MGA_UPLOAD_CTX) {
- mgaEmitContext(dev_priv);
- sarea_priv->dirty &= ~MGA_UPLOAD_CTX;
- }
+ if ( dirty & MGA_UPLOAD_TEX0 ) {
+ mga_g400_emit_tex0( dev_priv );
+ sarea_priv->dirty &= ~MGA_UPLOAD_TEX0;
+ }
- if (dirty & MGA_UPLOAD_TEX0) {
- mgaG200EmitTex(dev_priv);
- sarea_priv->dirty &= ~MGA_UPLOAD_TEX0;
- }
+ if ( (dirty & MGA_UPLOAD_TEX1) && multitex ) {
+ mga_g400_emit_tex1( dev_priv );
+ sarea_priv->dirty &= ~MGA_UPLOAD_TEX1;
}
}
+/* ================================================================
+ * SAREA state verification
+ */
+
/* Disallow all write destinations except the front and backbuffer.
*/
-static int mgaVerifyContext(drm_mga_private_t * dev_priv)
+static int mga_verify_context( drm_mga_private_t *dev_priv )
{
drm_mga_sarea_t *sarea_priv = dev_priv->sarea_priv;
- unsigned int *regs = sarea_priv->ContextState;
-
- DRM_DEBUG("%s\n", __FUNCTION__);
-
- if (regs[MGA_CTXREG_DSTORG] != dev_priv->frontOffset &&
- regs[MGA_CTXREG_DSTORG] != dev_priv->backOffset) {
- DRM_DEBUG("BAD DSTORG: %x (front %x, back %x)\n\n",
- regs[MGA_CTXREG_DSTORG], dev_priv->frontOffset,
- dev_priv->backOffset);
- regs[MGA_CTXREG_DSTORG] = 0;
- return -1;
+ drm_mga_context_regs_t *ctx = &sarea_priv->context_state;
+
+ if ( ctx->dstorg != dev_priv->front_offset &&
+ ctx->dstorg != dev_priv->back_offset ) {
+ DRM_ERROR( "*** bad DSTORG: %x (front %x, back %x)\n\n",
+ ctx->dstorg, dev_priv->front_offset,
+ dev_priv->back_offset );
+ ctx->dstorg = 0;
+ DRM_OS_RETURN( EINVAL );
}
return 0;
@@ -443,667 +420,647 @@ static int mgaVerifyContext(drm_mga_private_t * dev_priv)
/* Disallow texture reads from PCI space.
*/
-static int mgaVerifyTex(drm_mga_private_t * dev_priv, int unit)
+static int mga_verify_tex( drm_mga_private_t *dev_priv, int unit )
{
drm_mga_sarea_t *sarea_priv = dev_priv->sarea_priv;
+ drm_mga_texture_regs_t *tex = &sarea_priv->tex_state[unit];
+ unsigned int org;
- DRM_DEBUG("%s\n", __FUNCTION__);
+ org = tex->texorg & (MGA_TEXORGMAP_MASK | MGA_TEXORGACC_MASK);
- if ((sarea_priv->TexState[unit][MGA_TEXREG_ORG] & 0x3) == 0x1) {
- DRM_DEBUG("BAD TEXREG_ORG: %x, unit %d\n",
- sarea_priv->TexState[unit][MGA_TEXREG_ORG],
- unit);
- sarea_priv->TexState[unit][MGA_TEXREG_ORG] = 0;
- return -1;
+ if ( org == (MGA_TEXORGMAP_SYSMEM | MGA_TEXORGACC_PCI) ) {
+ DRM_ERROR( "*** bad TEXORG: 0x%x, unit %d\n",
+ tex->texorg, unit );
+ tex->texorg = 0;
+ DRM_OS_RETURN( EINVAL );
}
return 0;
}
-static int mgaVerifyState(drm_mga_private_t * dev_priv)
+static int mga_verify_state( drm_mga_private_t *dev_priv )
{
drm_mga_sarea_t *sarea_priv = dev_priv->sarea_priv;
unsigned int dirty = sarea_priv->dirty;
- int rv = 0;
-
- DRM_DEBUG("%s\n", __FUNCTION__);
+ int ret = 0;
- if (sarea_priv->nbox > MGA_NR_SAREA_CLIPRECTS)
+ if ( sarea_priv->nbox > MGA_NR_SAREA_CLIPRECTS )
sarea_priv->nbox = MGA_NR_SAREA_CLIPRECTS;
- if (dirty & MGA_UPLOAD_CTX)
- rv |= mgaVerifyContext(dev_priv);
+ if ( dirty & MGA_UPLOAD_CONTEXT )
+ ret |= mga_verify_context( dev_priv );
- if (dirty & MGA_UPLOAD_TEX0)
- rv |= mgaVerifyTex(dev_priv, 0);
+ if ( dirty & MGA_UPLOAD_TEX0 )
+ ret |= mga_verify_tex( dev_priv, 0 );
- if (dev_priv->chipset == MGA_CARD_TYPE_G400) {
- if (dirty & MGA_UPLOAD_TEX1)
- rv |= mgaVerifyTex(dev_priv, 1);
+ if ( dev_priv->chipset == MGA_CARD_TYPE_G400 ) {
+ if ( dirty & MGA_UPLOAD_TEX1 )
+ ret |= mga_verify_tex( dev_priv, 1 );
- if (dirty & MGA_UPLOAD_PIPE)
- rv |= (sarea_priv->WarpPipe > MGA_MAX_G400_PIPES);
+ if ( dirty & MGA_UPLOAD_PIPE )
+ ret |= ( sarea_priv->warp_pipe > MGA_MAX_G400_PIPES );
} else {
- if (dirty & MGA_UPLOAD_PIPE)
- rv |= (sarea_priv->WarpPipe > MGA_MAX_G200_PIPES);
+ if ( dirty & MGA_UPLOAD_PIPE )
+ ret |= ( sarea_priv->warp_pipe > MGA_MAX_G200_PIPES );
}
- return rv == 0;
+ return ( ret == 0 );
}
-static int mgaVerifyIload(drm_mga_private_t * dev_priv,
- unsigned long bus_address,
- unsigned int dstOrg, int length)
+static int mga_verify_iload( drm_mga_private_t *dev_priv,
+ unsigned int dstorg, unsigned int length )
{
- DRM_DEBUG("%s\n", __FUNCTION__);
+ if ( dstorg < dev_priv->texture_offset ||
+ dstorg + length > (dev_priv->texture_offset +
+ dev_priv->texture_size) ) {
+ DRM_ERROR( "*** bad iload DSTORG: 0x%x\n", dstorg );
+ DRM_OS_RETURN( EINVAL );
+ }
- if (dstOrg < dev_priv->textureOffset ||
- dstOrg + length >
- (dev_priv->textureOffset + dev_priv->textureSize)) {
- return EINVAL;
+ if ( length & MGA_ILOAD_MASK ) {
+ DRM_ERROR( "*** bad iload length: 0x%x\n",
+ length & MGA_ILOAD_MASK );
+ DRM_OS_RETURN( EINVAL );
}
- if (length % 64) {
- return EINVAL;
+
+ return 0;
+}
+
+static int mga_verify_blit( drm_mga_private_t *dev_priv,
+ unsigned int srcorg, unsigned int dstorg )
+{
+ if ( (srcorg & 0x3) == (MGA_SRCACC_PCI | MGA_SRCMAP_SYSMEM) ||
+ (dstorg & 0x3) == (MGA_SRCACC_PCI | MGA_SRCMAP_SYSMEM) ) {
+ DRM_ERROR( "*** bad blit: src=0x%x dst=0x%x\n",
+ srcorg, dstorg );
+ DRM_OS_RETURN( EINVAL );
}
return 0;
}
-/* This copies a 64 byte aligned agp region to the frambuffer
- * with a standard blit, the ioctl needs to do checking */
-static void mga_dma_dispatch_tex_blit(drm_device_t * dev,
- unsigned long bus_address,
- int length, unsigned int destOrg)
+/* ================================================================
+ *
+ */
+
+static void mga_dma_dispatch_clear( drm_device_t *dev,
+ drm_mga_clear_t *clear )
{
drm_mga_private_t *dev_priv = dev->dev_private;
- int use_agp = PDEA_pagpxfer_enable | 0x00000001;
- u16 y2;
- PRIMLOCALS;
- DRM_DEBUG("%s\n", __FUNCTION__);
+ drm_mga_sarea_t *sarea_priv = dev_priv->sarea_priv;
+ drm_mga_context_regs_t *ctx = &sarea_priv->context_state;
+ drm_clip_rect_t *pbox = sarea_priv->boxes;
+ int nbox = sarea_priv->nbox;
+ int i;
+ DMA_LOCALS;
+ DRM_DEBUG( __FUNCTION__ ":\n" );
- y2 = length / 64;
+ BEGIN_DMA( 1 );
- PRIM_OVERFLOW(dev, dev_priv, 30);
+ DMA_BLOCK( MGA_DMAPAD, 0x00000000,
+ MGA_DMAPAD, 0x00000000,
+ MGA_DWGSYNC, 0x00007100,
+ MGA_DWGSYNC, 0x00007000 );
- PRIMOUTREG(MGAREG_DSTORG, destOrg);
- PRIMOUTREG(MGAREG_MACCESS, 0x00000000);
- DRM_DEBUG("srcorg : %lx\n", bus_address | use_agp);
- PRIMOUTREG(MGAREG_SRCORG, (u32) bus_address | use_agp);
- PRIMOUTREG(MGAREG_AR5, 64);
+ ADVANCE_DMA();
- PRIMOUTREG(MGAREG_PITCH, 64);
- PRIMOUTREG(MGAREG_DMAPAD, 0);
- PRIMOUTREG(MGAREG_DMAPAD, 0);
- PRIMOUTREG(MGAREG_DWGCTL, MGA_COPY_CMD);
+ for ( i = 0 ; i < nbox ; i++ ) {
+ drm_clip_rect_t *box = &pbox[i];
+ u32 height = box->y2 - box->y1;
- PRIMOUTREG(MGAREG_AR0, 63);
- PRIMOUTREG(MGAREG_AR3, 0);
- PRIMOUTREG(MGAREG_FXBNDRY, (63 << 16));
- PRIMOUTREG(MGAREG_YDSTLEN + MGAREG_MGA_EXEC, y2);
+ DRM_DEBUG( " from=%d,%d to=%d,%d\n",
+ box->x1, box->y1, box->x2, box->y2 );
- PRIMOUTREG(MGAREG_DMAPAD, 0);
- PRIMOUTREG(MGAREG_SRCORG, 0);
- PRIMOUTREG(MGAREG_PITCH, dev_priv->stride / dev_priv->cpp);
- PRIMOUTREG(MGAREG_DWGSYNC, 0x7000);
+ if ( clear->flags & MGA_FRONT ) {
+ BEGIN_DMA( 2 );
- PRIMADVANCE(dev_priv);
-}
+ DMA_BLOCK( MGA_DMAPAD, 0x00000000,
+ MGA_PLNWT, clear->color_mask,
+ MGA_YDSTLEN, (box->y1 << 16) | height,
+ MGA_FXBNDRY, (box->x2 << 16) | box->x1 );
-static void mga_dma_dispatch_vertex(drm_device_t * dev, drm_buf_t * buf)
-{
- drm_mga_private_t *dev_priv = dev->dev_private;
- drm_mga_buf_priv_t *buf_priv = buf->dev_private;
- drm_mga_sarea_t *sarea_priv = dev_priv->sarea_priv;
- unsigned long address = (unsigned long) buf->bus_address;
- int length = buf->used;
- int use_agp = PDEA_pagpxfer_enable;
- int i = 0;
- PRIMLOCALS;
- DRM_DEBUG("%s\n", __FUNCTION__);
+ DMA_BLOCK( MGA_DMAPAD, 0x00000000,
+ MGA_FCOL, clear->clear_color,
+ MGA_DSTORG, dev_priv->front_offset,
+ MGA_DWGCTL + MGA_EXEC,
+ dev_priv->clear_cmd );
- DRM_DEBUG("dispatch vertex %d addr 0x%lx, "
- "length 0x%x nbox %d dirty %x\n",
- buf->idx, address, length,
- sarea_priv->nbox, sarea_priv->dirty);
+ ADVANCE_DMA();
+ }
- DRM_DEBUG("used : %d, total : %d\n", buf->used, buf->total);
- if (buf->used) {
- /* WARNING: if you change any of the state functions verify
- * these numbers (Overestimating this doesn't hurt).
- */
- buf_priv->dispatched = 1;
- PRIM_OVERFLOW(dev, dev_priv,
- (MAX_STATE_SIZE + (5 * MGA_NR_SAREA_CLIPRECTS)));
- mgaEmitState(dev_priv);
+ if ( clear->flags & MGA_BACK ) {
+ BEGIN_DMA( 2 );
-#if 0
- length = dev_priv->vertexsize * 3 * 4;
-#endif
+ DMA_BLOCK( MGA_DMAPAD, 0x00000000,
+ MGA_PLNWT, clear->color_mask,
+ MGA_YDSTLEN, (box->y1 << 16) | height,
+ MGA_FXBNDRY, (box->x2 << 16) | box->x1 );
+ DMA_BLOCK( MGA_DMAPAD, 0x00000000,
+ MGA_FCOL, clear->clear_color,
+ MGA_DSTORG, dev_priv->back_offset,
+ MGA_DWGCTL + MGA_EXEC,
+ dev_priv->clear_cmd );
+ ADVANCE_DMA();
+ }
- do {
- if (i < sarea_priv->nbox) {
- DRM_DEBUG("idx %d Emit box %d/%d:"
- "%d,%d - %d,%d\n",
- buf->idx,
- i, sarea_priv->nbox,
- sarea_priv->boxes[i].x1,
- sarea_priv->boxes[i].y1,
- sarea_priv->boxes[i].x2,
- sarea_priv->boxes[i].y2);
-
- mgaEmitClipRect(dev_priv,
- &sarea_priv->boxes[i]);
- }
+ if ( clear->flags & MGA_DEPTH ) {
+ BEGIN_DMA( 2 );
- PRIMGETPTR(dev_priv);
- PRIMOUTREG(MGAREG_DMAPAD, 0);
- PRIMOUTREG(MGAREG_DMAPAD, 0);
- PRIMOUTREG(MGAREG_SECADDRESS,
- ((u32) address) | TT_VERTEX);
- PRIMOUTREG(MGAREG_SECEND,
- (((u32) (address + length)) | use_agp));
- PRIMADVANCE(dev_priv);
- } while (++i < sarea_priv->nbox);
- }
+ DMA_BLOCK( MGA_DMAPAD, 0x00000000,
+ MGA_PLNWT, clear->depth_mask,
+ MGA_YDSTLEN, (box->y1 << 16) | height,
+ MGA_FXBNDRY, (box->x2 << 16) | box->x1 );
+
+ DMA_BLOCK( MGA_DMAPAD, 0x00000000,
+ MGA_FCOL, clear->clear_depth,
+ MGA_DSTORG, dev_priv->depth_offset,
+ MGA_DWGCTL + MGA_EXEC,
+ dev_priv->clear_cmd );
+
+ ADVANCE_DMA();
+ }
- if (buf_priv->discard) {
- if (buf_priv->dispatched == 1)
- AGEBUF(dev_priv, buf_priv);
- buf_priv->dispatched = 0;
- mga_freelist_put(dev, buf);
}
+ BEGIN_DMA( 1 );
+
+ /* Force reset of DWGCTL */
+ DMA_BLOCK( MGA_DMAPAD, 0x00000000,
+ MGA_DMAPAD, 0x00000000,
+ MGA_PLNWT, ctx->plnwt,
+ MGA_DWGCTL, ctx->dwgctl );
+
+ ADVANCE_DMA();
+ FLUSH_DMA();
}
+static void mga_dma_dispatch_swap( drm_device_t *dev )
+{
+ drm_mga_private_t *dev_priv = dev->dev_private;
+ drm_mga_sarea_t *sarea_priv = dev_priv->sarea_priv;
+ drm_mga_context_regs_t *ctx = &sarea_priv->context_state;
+ drm_clip_rect_t *pbox = sarea_priv->boxes;
+ int nbox = sarea_priv->nbox;
+ int i;
+ DMA_LOCALS;
+ DRM_DEBUG( __FUNCTION__ ":\n" );
+
+ sarea_priv->last_frame.head = dev_priv->prim.tail;
+ sarea_priv->last_frame.wrap = dev_priv->prim.last_wrap;
+
+ BEGIN_DMA( 4 + nbox );
+
+ DMA_BLOCK( MGA_DMAPAD, 0x00000000,
+ MGA_DMAPAD, 0x00000000,
+ MGA_DWGSYNC, 0x00007100,
+ MGA_DWGSYNC, 0x00007000 );
+
+ DMA_BLOCK( MGA_DSTORG, dev_priv->front_offset,
+ MGA_MACCESS, dev_priv->maccess,
+ MGA_SRCORG, dev_priv->back_offset,
+ MGA_AR5, dev_priv->front_pitch );
+
+ DMA_BLOCK( MGA_DMAPAD, 0x00000000,
+ MGA_DMAPAD, 0x00000000,
+ MGA_PLNWT, 0xffffffff,
+ MGA_DWGCTL, MGA_DWGCTL_COPY );
+
+ for ( i = 0 ; i < nbox ; i++ ) {
+ drm_clip_rect_t *box = &pbox[i];
+ u32 height = box->y2 - box->y1;
+ u32 start = box->y1 * dev_priv->front_pitch;
+
+ DRM_DEBUG( " from=%d,%d to=%d,%d\n",
+ box->x1, box->y1, box->x2, box->y2 );
+
+ DMA_BLOCK( MGA_AR0, start + box->x2 - 1,
+ MGA_AR3, start + box->x1,
+ MGA_FXBNDRY, ((box->x2 - 1) << 16) | box->x1,
+ MGA_YDSTLEN + MGA_EXEC,
+ (box->y1 << 16) | height );
+ }
-static void mga_dma_dispatch_indices(drm_device_t * dev,
- drm_buf_t * buf,
- unsigned int start, unsigned int end)
+ DMA_BLOCK( MGA_DMAPAD, 0x00000000,
+ MGA_PLNWT, ctx->plnwt,
+ MGA_SRCORG, dev_priv->front_offset,
+ MGA_DWGCTL, ctx->dwgctl );
+
+ ADVANCE_DMA();
+
+ FLUSH_DMA();
+
+ DRM_DEBUG( "%s... done.\n", __FUNCTION__ );
+}
+
+static void mga_dma_dispatch_vertex( drm_device_t *dev, drm_buf_t *buf )
{
drm_mga_private_t *dev_priv = dev->dev_private;
drm_mga_buf_priv_t *buf_priv = buf->dev_private;
drm_mga_sarea_t *sarea_priv = dev_priv->sarea_priv;
- unsigned int address = (unsigned int) buf->bus_address;
- int use_agp = PDEA_pagpxfer_enable;
+ u32 address = (u32) buf->bus_address;
+ u32 length = (u32) buf->used;
int i = 0;
- PRIMLOCALS;
- DRM_DEBUG("%s\n", __FUNCTION__);
-
- DRM_DEBUG("dispatch indices %d addr 0x%x, "
- "start 0x%x end 0x%x nbox %d dirty %x\n",
- buf->idx, address, start, end,
- sarea_priv->nbox, sarea_priv->dirty);
-
- if (start != end) {
- /* WARNING: if you change any of the state functions verify
- * these numbers (Overestimating this doesn't hurt).
- */
+ DMA_LOCALS;
+ DRM_DEBUG( "vertex: buf=%d used=%d\n", buf->idx, buf->used );
+
+ if ( buf->used ) {
buf_priv->dispatched = 1;
- PRIM_OVERFLOW(dev, dev_priv,
- (MAX_STATE_SIZE + (5 * MGA_NR_SAREA_CLIPRECTS)));
- mgaEmitState(dev_priv);
+ MGA_EMIT_STATE( dev_priv, sarea_priv->dirty );
do {
- if (i < sarea_priv->nbox) {
- DRM_DEBUG("idx %d Emit box %d/%d:"
- "%d,%d - %d,%d\n",
- buf->idx,
- i, sarea_priv->nbox,
- sarea_priv->boxes[i].x1,
- sarea_priv->boxes[i].y1,
- sarea_priv->boxes[i].x2,
- sarea_priv->boxes[i].y2);
-
- mgaEmitClipRect(dev_priv,
- &sarea_priv->boxes[i]);
+ if ( i < sarea_priv->nbox ) {
+ mga_emit_clip_rect( dev_priv,
+ &sarea_priv->boxes[i] );
}
- PRIMGETPTR(dev_priv);
- PRIMOUTREG(MGAREG_DMAPAD, 0);
- PRIMOUTREG(MGAREG_DMAPAD, 0);
- PRIMOUTREG(MGAREG_SETUPADDRESS,
- ((address + start) |
- SETADD_mode_vertlist));
- PRIMOUTREG(MGAREG_SETUPEND,
- ((address + end) | use_agp));
-
- PRIMADVANCE(dev_priv);
- } while (++i < sarea_priv->nbox);
+ BEGIN_DMA( 1 );
+
+ DMA_BLOCK( MGA_DMAPAD, 0x00000000,
+ MGA_DMAPAD, 0x00000000,
+ MGA_SECADDRESS, (address |
+ MGA_DMA_VERTEX),
+ MGA_SECEND, ((address + length) |
+ MGA_PAGPXFER) );
+
+ ADVANCE_DMA();
+ } while ( ++i < sarea_priv->nbox );
}
- if (buf_priv->discard) {
- if (buf_priv->dispatched == 1)
- AGEBUF(dev_priv, buf_priv);
+
+ if ( buf_priv->discard ) {
+ AGE_BUFFER( buf_priv );
+ buf->pending = 0;
+ buf->used = 0;
buf_priv->dispatched = 0;
- mga_freelist_put(dev, buf);
- }
-}
+ mga_freelist_put( dev, buf );
+ }
-static void mga_dma_dispatch_clear(drm_device_t * dev, int flags,
- unsigned int clear_color,
- unsigned int clear_zval,
- unsigned int clear_colormask,
- unsigned int clear_depthmask)
+ FLUSH_DMA();
+}
+static void mga_dma_dispatch_indices( drm_device_t *dev, drm_buf_t *buf,
+ unsigned int start, unsigned int end )
{
drm_mga_private_t *dev_priv = dev->dev_private;
+ drm_mga_buf_priv_t *buf_priv = buf->dev_private;
drm_mga_sarea_t *sarea_priv = dev_priv->sarea_priv;
- unsigned int *regs = sarea_priv->ContextState;
- int nbox = sarea_priv->nbox;
- drm_clip_rect_t *pbox = sarea_priv->boxes;
- unsigned int cmd;
- int i;
- PRIMLOCALS;
- DRM_DEBUG("%s\n", __FUNCTION__);
-
- if (dev_priv->sgram)
- cmd = MGA_CLEAR_CMD | DC_atype_blk;
- else
- cmd = MGA_CLEAR_CMD | DC_atype_rstr;
-
- PRIM_OVERFLOW(dev, dev_priv, 35 * MGA_NR_SAREA_CLIPRECTS);
-
- for (i = 0; i < nbox; i++) {
- unsigned int height = pbox[i].y2 - pbox[i].y1;
-
- DRM_DEBUG("dispatch clear %d,%d-%d,%d flags %x!\n",
- pbox[i].x1, pbox[i].y1, pbox[i].x2,
- pbox[i].y2, flags);
-
- if (flags & MGA_FRONT) {
- DRM_DEBUG("clear front\n");
- PRIMOUTREG(MGAREG_DMAPAD, 0);
- PRIMOUTREG(MGAREG_PLNWT, clear_colormask);
- PRIMOUTREG(MGAREG_YDSTLEN,
- (pbox[i].y1 << 16) | height);
- PRIMOUTREG(MGAREG_FXBNDRY,
- (pbox[i].x2 << 16) | pbox[i].x1);
-
- PRIMOUTREG(MGAREG_DMAPAD, 0);
- PRIMOUTREG(MGAREG_FCOL, clear_color);
- PRIMOUTREG(MGAREG_DSTORG, dev_priv->frontOffset);
- PRIMOUTREG(MGAREG_DWGCTL + MGAREG_MGA_EXEC, cmd);
- }
+ u32 address = (u32) buf->bus_address;
+ int i = 0;
+ DMA_LOCALS;
+ DRM_DEBUG( "indices: buf=%d start=%d end=%d\n", buf->idx, start, end );
- if (flags & MGA_BACK) {
- DRM_DEBUG("clear back\n");
- PRIMOUTREG(MGAREG_DMAPAD, 0);
- PRIMOUTREG(MGAREG_PLNWT, clear_colormask);
- PRIMOUTREG(MGAREG_YDSTLEN,
- (pbox[i].y1 << 16) | height);
- PRIMOUTREG(MGAREG_FXBNDRY,
- (pbox[i].x2 << 16) | pbox[i].x1);
-
- PRIMOUTREG(MGAREG_DMAPAD, 0);
- PRIMOUTREG(MGAREG_FCOL, clear_color);
- PRIMOUTREG(MGAREG_DSTORG, dev_priv->backOffset);
- PRIMOUTREG(MGAREG_DWGCTL + MGAREG_MGA_EXEC, cmd);
- }
+ if ( start != end ) {
+ buf_priv->dispatched = 1;
- if (flags & MGA_DEPTH) {
- DRM_DEBUG("clear depth\n");
- PRIMOUTREG(MGAREG_DMAPAD, 0);
- PRIMOUTREG(MGAREG_PLNWT, clear_depthmask);
- PRIMOUTREG(MGAREG_YDSTLEN,
- (pbox[i].y1 << 16) | height);
- PRIMOUTREG(MGAREG_FXBNDRY,
- (pbox[i].x2 << 16) | pbox[i].x1);
-
- PRIMOUTREG(MGAREG_DMAPAD, 0);
- PRIMOUTREG(MGAREG_FCOL, clear_zval);
- PRIMOUTREG(MGAREG_DSTORG, dev_priv->depthOffset);
- PRIMOUTREG(MGAREG_DWGCTL + MGAREG_MGA_EXEC, cmd);
- }
+ MGA_EMIT_STATE( dev_priv, sarea_priv->dirty );
+
+ do {
+ if ( i < sarea_priv->nbox ) {
+ mga_emit_clip_rect( dev_priv,
+ &sarea_priv->boxes[i] );
+ }
+
+ BEGIN_DMA( 1 );
+
+ DMA_BLOCK( MGA_DMAPAD, 0x00000000,
+ MGA_DMAPAD, 0x00000000,
+ MGA_SETUPADDRESS, address + start,
+ MGA_SETUPEND, ((address + end) |
+ MGA_PAGPXFER) );
+
+ ADVANCE_DMA();
+ } while ( ++i < sarea_priv->nbox );
}
- /* Force reset of DWGCTL */
- PRIMOUTREG(MGAREG_DMAPAD, 0);
- PRIMOUTREG(MGAREG_DMAPAD, 0);
- PRIMOUTREG(MGAREG_DMAPAD, 0);
- PRIMOUTREG(MGAREG_DWGCTL, regs[MGA_CTXREG_DWGCTL]);
- PRIMADVANCE(dev_priv);
+ if ( buf_priv->discard ) {
+ AGE_BUFFER( buf_priv );
+ buf->pending = 0;
+ buf->used = 0;
+ buf_priv->dispatched = 0;
+
+ mga_freelist_put( dev, buf );
+ }
+
+ FLUSH_DMA();
}
-static void mga_dma_dispatch_swap(drm_device_t * dev)
+/* This copies a 64 byte aligned agp region to the frambuffer with a
+ * standard blit, the ioctl needs to do checking.
+ */
+static void mga_dma_dispatch_iload( drm_device_t *dev, drm_buf_t *buf,
+ unsigned int dstorg, unsigned int length )
{
drm_mga_private_t *dev_priv = dev->dev_private;
- drm_mga_sarea_t *sarea_priv = dev_priv->sarea_priv;
- unsigned int *regs = sarea_priv->ContextState;
- int nbox = sarea_priv->nbox;
- drm_clip_rect_t *pbox = sarea_priv->boxes;
- int i;
- int pixel_stride = dev_priv->stride / dev_priv->cpp;
+ drm_mga_buf_priv_t *buf_priv = buf->dev_private;
+ drm_mga_context_regs_t *ctx = &dev_priv->sarea_priv->context_state;
+ u32 srcorg = buf->bus_address | MGA_SRCACC_AGP | MGA_SRCMAP_SYSMEM;
+ u32 y2;
+ DMA_LOCALS;
+ DRM_DEBUG( "%s: buf=%d used=%d\n",
+ __FUNCTION__, buf->idx, buf->used );
- PRIMLOCALS;
- DRM_DEBUG("%s\n", __FUNCTION__);
+ y2 = length / 64;
- PRIM_OVERFLOW(dev, dev_priv, (MGA_NR_SAREA_CLIPRECTS * 5) + 20);
+ BEGIN_DMA( 5 );
- PRIMOUTREG(MGAREG_DMAPAD, 0);
- PRIMOUTREG(MGAREG_DMAPAD, 0);
- PRIMOUTREG(MGAREG_DWGSYNC, 0x7100);
- PRIMOUTREG(MGAREG_DWGSYNC, 0x7000);
+ DMA_BLOCK( MGA_DMAPAD, 0x00000000,
+ MGA_DMAPAD, 0x00000000,
+ MGA_DWGSYNC, 0x00007100,
+ MGA_DWGSYNC, 0x00007000 );
- PRIMOUTREG(MGAREG_DSTORG, dev_priv->frontOffset);
- PRIMOUTREG(MGAREG_MACCESS, dev_priv->mAccess);
- PRIMOUTREG(MGAREG_SRCORG, dev_priv->backOffset);
- PRIMOUTREG(MGAREG_AR5, pixel_stride);
+ DMA_BLOCK( MGA_DSTORG, dstorg,
+ MGA_MACCESS, 0x00000000,
+ MGA_SRCORG, srcorg,
+ MGA_AR5, 64 );
- PRIMOUTREG(MGAREG_DMAPAD, 0);
- PRIMOUTREG(MGAREG_DMAPAD, 0);
- PRIMOUTREG(MGAREG_DMAPAD, 0);
- PRIMOUTREG(MGAREG_DWGCTL, MGA_COPY_CMD);
+ DMA_BLOCK( MGA_PITCH, 64,
+ MGA_PLNWT, 0xffffffff,
+ MGA_DMAPAD, 0x00000000,
+ MGA_DWGCTL, MGA_DWGCTL_COPY );
+ DMA_BLOCK( MGA_AR0, 63,
+ MGA_AR3, 0,
+ MGA_FXBNDRY, (63 << 16) | 0,
+ MGA_YDSTLEN + MGA_EXEC, y2 );
- for (i = 0; i < nbox; i++) {
- unsigned int h = pbox[i].y2 - pbox[i].y1;
- unsigned int start = pbox[i].y1 * pixel_stride;
+ DMA_BLOCK( MGA_PLNWT, ctx->plnwt,
+ MGA_SRCORG, dev_priv->front_offset,
+ MGA_PITCH, dev_priv->front_pitch,
+ MGA_DWGSYNC, 0x00007000 );
- PRIMOUTREG(MGAREG_AR0, start + pbox[i].x2 - 1);
- PRIMOUTREG(MGAREG_AR3, start + pbox[i].x1);
- PRIMOUTREG(MGAREG_FXBNDRY,
- pbox[i].x1 | ((pbox[i].x2 - 1) << 16));
- PRIMOUTREG(MGAREG_YDSTLEN + MGAREG_MGA_EXEC,
- (pbox[i].y1 << 16) | h);
- }
+ ADVANCE_DMA();
- /* Force reset of DWGCTL */
- PRIMOUTREG(MGAREG_DMAPAD, 0);
- PRIMOUTREG(MGAREG_DMAPAD, 0);
- PRIMOUTREG(MGAREG_SRCORG, 0);
- PRIMOUTREG(MGAREG_DWGCTL, regs[MGA_CTXREG_DWGCTL]);
+ AGE_BUFFER( buf_priv );
+
+ buf->pending = 0;
+ buf->used = 0;
+ buf_priv->dispatched = 0;
- PRIMADVANCE(dev_priv);
+ mga_freelist_put( dev, buf );
+
+ FLUSH_DMA();
}
-int mga_clear_bufs(dev_t kdev, u_long cmd, caddr_t data,
- int flags, struct proc *p)
+static void mga_dma_dispatch_blit( drm_device_t *dev,
+ drm_mga_blit_t *blit )
{
- drm_device_t *dev = kdev->si_drv1;
- drm_mga_private_t *dev_priv =
- (drm_mga_private_t *) dev->dev_private;
+ drm_mga_private_t *dev_priv = dev->dev_private;
drm_mga_sarea_t *sarea_priv = dev_priv->sarea_priv;
- drm_mga_clear_t clear;
- int s;
+ drm_mga_context_regs_t *ctx = &sarea_priv->context_state;
+ drm_clip_rect_t *pbox = sarea_priv->boxes;
+ int nbox = sarea_priv->nbox;
+ u32 scandir = 0, i;
+ DMA_LOCALS;
+ DRM_DEBUG( __FUNCTION__ ":\n" );
+
+ BEGIN_DMA( 4 + nbox );
+
+ DMA_BLOCK( MGA_DMAPAD, 0x00000000,
+ MGA_DMAPAD, 0x00000000,
+ MGA_DWGSYNC, 0x00007100,
+ MGA_DWGSYNC, 0x00007000 );
+
+ DMA_BLOCK( MGA_DWGCTL, MGA_DWGCTL_COPY,
+ MGA_PLNWT, blit->planemask,
+ MGA_SRCORG, blit->srcorg,
+ MGA_DSTORG, blit->dstorg );
+
+ DMA_BLOCK( MGA_SGN, scandir,
+ MGA_MACCESS, dev_priv->maccess,
+ MGA_AR5, blit->ydir * blit->src_pitch,
+ MGA_PITCH, blit->dst_pitch );
+
+ for ( i = 0 ; i < nbox ; i++ ) {
+ int srcx = pbox[i].x1 + blit->delta_sx;
+ int srcy = pbox[i].y1 + blit->delta_sy;
+ int dstx = pbox[i].x1 + blit->delta_dx;
+ int dsty = pbox[i].y1 + blit->delta_dy;
+ int h = pbox[i].y2 - pbox[i].y1;
+ int w = pbox[i].x2 - pbox[i].x1 - 1;
+ int start;
+
+ if ( blit->ydir == -1 ) {
+ srcy = blit->height - srcy - 1;
+ }
- clear = *(drm_mga_clear_t *) data;
- DRM_DEBUG("%s\n", __FUNCTION__);
+ start = srcy * blit->src_pitch + srcx;
- if (!_DRM_LOCK_IS_HELD(dev->lock.hw_lock->lock)) {
- DRM_ERROR("mga_clear_bufs called without lock held\n");
- return EINVAL;
+ DMA_BLOCK( MGA_AR0, start + w,
+ MGA_AR3, start,
+ MGA_FXBNDRY, ((dstx + w) << 16) | (dstx & 0xffff),
+ MGA_YDSTLEN + MGA_EXEC, (dsty << 16) | h );
}
- if (sarea_priv->nbox > MGA_NR_SAREA_CLIPRECTS)
- sarea_priv->nbox = MGA_NR_SAREA_CLIPRECTS;
-
- /* Make sure we restore the 3D state next time.
+ /* Do something to flush AGP?
*/
- dev_priv->sarea_priv->dirty |= MGA_UPLOAD_CTX;
- mga_dma_dispatch_clear(dev, clear.flags,
- clear.clear_color,
- clear.clear_depth,
- clear.clear_color_mask,
- clear.clear_depth_mask);
- PRIMUPDATE(dev_priv);
- mga_flush_write_combine();
- s = splsofttq();
- mga_dma_schedule(dev, 1);
- splx(s);
- return 0;
+
+ /* Force reset of DWGCTL */
+ DMA_BLOCK( MGA_DMAPAD, 0x00000000,
+ MGA_PLNWT, ctx->plnwt,
+ MGA_PITCH, dev_priv->front_pitch,
+ MGA_DWGCTL, ctx->dwgctl );
+
+ ADVANCE_DMA();
}
-int mga_swap_bufs(dev_t kdev, u_long cmd, caddr_t data,
- int flags, struct proc *p)
+
+/* ================================================================
+ *
+ */
+
+int mga_dma_clear( DRM_OS_IOCTL )
{
- drm_device_t *dev = kdev->si_drv1;
- drm_mga_private_t *dev_priv =
- (drm_mga_private_t *) dev->dev_private;
+ DRM_OS_DEVICE;
+ drm_mga_private_t *dev_priv = dev->dev_private;
drm_mga_sarea_t *sarea_priv = dev_priv->sarea_priv;
- int s;
+ drm_mga_clear_t clear;
- DRM_DEBUG("%s\n", __FUNCTION__);
+ LOCK_TEST_WITH_RETURN( dev );
- if (!_DRM_LOCK_IS_HELD(dev->lock.hw_lock->lock)) {
- DRM_ERROR("mga_swap_bufs called without lock held\n");
- return EINVAL;
- }
+ DRM_OS_KRNFROMUSR( clear, (drm_mga_clear_t *) data, sizeof(clear) );
- if (sarea_priv->nbox > MGA_NR_SAREA_CLIPRECTS)
+ if ( sarea_priv->nbox > MGA_NR_SAREA_CLIPRECTS )
sarea_priv->nbox = MGA_NR_SAREA_CLIPRECTS;
+ WRAP_TEST_WITH_RETURN( dev_priv );
+
+ mga_dma_dispatch_clear( dev, &clear );
+
/* Make sure we restore the 3D state next time.
*/
- dev_priv->sarea_priv->dirty |= MGA_UPLOAD_CTX;
- mga_dma_dispatch_swap(dev);
- PRIMUPDATE(dev_priv);
- set_bit(MGA_BUF_SWAP_PENDING,
- &dev_priv->current_prim->buffer_status);
- mga_flush_write_combine();
- s = splsofttq();
- mga_dma_schedule(dev, 1);
- splx(s);
+ dev_priv->sarea_priv->dirty |= MGA_UPLOAD_CONTEXT;
+
return 0;
}
-int mga_iload(dev_t kdev, u_long cmd, caddr_t data,
- int flags, struct proc *p)
+int mga_dma_swap( DRM_OS_IOCTL )
{
- drm_device_t *dev = kdev->si_drv1;
- drm_device_dma_t *dma = dev->dma;
- drm_mga_private_t *dev_priv =
- (drm_mga_private_t *) dev->dev_private;
+ DRM_OS_DEVICE;
+ drm_mga_private_t *dev_priv = dev->dev_private;
drm_mga_sarea_t *sarea_priv = dev_priv->sarea_priv;
- drm_buf_t *buf;
- drm_mga_buf_priv_t *buf_priv;
- drm_mga_iload_t iload;
- unsigned long bus_address;
- int s;
- DRM_DEBUG("%s\n", __FUNCTION__);
+ LOCK_TEST_WITH_RETURN( dev );
- DRM_DEBUG("Starting Iload\n");
- iload = *(drm_mga_iload_t *) data;
+ if ( sarea_priv->nbox > MGA_NR_SAREA_CLIPRECTS )
+ sarea_priv->nbox = MGA_NR_SAREA_CLIPRECTS;
- if (!_DRM_LOCK_IS_HELD(dev->lock.hw_lock->lock)) {
- DRM_ERROR("mga_iload called without lock held\n");
- return EINVAL;
- }
+ WRAP_TEST_WITH_RETURN( dev_priv );
- buf = dma->buflist[iload.idx];
- buf_priv = buf->dev_private;
- bus_address = buf->bus_address;
- DRM_DEBUG("bus_address %lx, length %d, destorg : %x\n",
- bus_address, iload.length, iload.destOrg);
-
- if (mgaVerifyIload(dev_priv,
- bus_address, iload.destOrg, iload.length)) {
- mga_freelist_put(dev, buf);
- return EINVAL;
- }
+ mga_dma_dispatch_swap( dev );
+
+ /* Make sure we restore the 3D state next time.
+ */
+ dev_priv->sarea_priv->dirty |= MGA_UPLOAD_CONTEXT;
- sarea_priv->dirty |= MGA_UPLOAD_CTX;
-
- mga_dma_dispatch_tex_blit(dev, bus_address, iload.length,
- iload.destOrg);
- AGEBUF(dev_priv, buf_priv);
- buf_priv->discard = 1;
- mga_freelist_put(dev, buf);
- mga_flush_write_combine();
- s = splsofttq();
- mga_dma_schedule(dev, 1);
- splx(s);
return 0;
}
-int mga_vertex(dev_t kdev, u_long cmd, caddr_t data,
- int flags, struct proc *p)
+int mga_dma_vertex( DRM_OS_IOCTL )
{
- drm_device_t *dev = kdev->si_drv1;
- drm_mga_private_t *dev_priv =
- (drm_mga_private_t *) dev->dev_private;
+ DRM_OS_DEVICE;
+ drm_mga_private_t *dev_priv = dev->dev_private;
drm_device_dma_t *dma = dev->dma;
drm_buf_t *buf;
drm_mga_buf_priv_t *buf_priv;
drm_mga_vertex_t vertex;
- DRM_DEBUG("%s\n", __FUNCTION__);
+ LOCK_TEST_WITH_RETURN( dev );
- vertex = *(drm_mga_vertex_t *) data;
-
- if (!_DRM_LOCK_IS_HELD(dev->lock.hw_lock->lock)) {
- DRM_ERROR("mga_vertex called without lock held\n");
- return EINVAL;
- }
-
- DRM_DEBUG("mga_vertex\n");
+ DRM_OS_KRNFROMUSR( vertex, (drm_mga_vertex_t *) data, sizeof(vertex) );
+ if(vertex.idx < 0 || vertex.idx > dma->buf_count) DRM_OS_RETURN( EINVAL );
buf = dma->buflist[vertex.idx];
buf_priv = buf->dev_private;
buf->used = vertex.used;
buf_priv->discard = vertex.discard;
- if (!mgaVerifyState(dev_priv)) {
- if (vertex.discard) {
- if (buf_priv->dispatched == 1)
- AGEBUF(dev_priv, buf_priv);
+ if ( !mga_verify_state( dev_priv ) ) {
+ if ( vertex.discard ) {
+ if ( buf_priv->dispatched == 1 )
+ AGE_BUFFER( buf_priv );
buf_priv->dispatched = 0;
- mga_freelist_put(dev, buf);
+ mga_freelist_put( dev, buf );
}
- DRM_DEBUG("bad state\n");
- return EINVAL;
+ DRM_OS_RETURN( EINVAL );
}
- mga_dma_dispatch_vertex(dev, buf);
+ WRAP_TEST_WITH_RETURN( dev_priv );
+
+ mga_dma_dispatch_vertex( dev, buf );
- PRIMUPDATE(dev_priv);
- mga_flush_write_combine();
- mga_dma_schedule(dev, 1);
return 0;
}
-
-int mga_indices(dev_t kdev, u_long cmd, caddr_t data,
- int flags, struct proc *p)
+int mga_dma_indices( DRM_OS_IOCTL )
{
- drm_device_t *dev = kdev->si_drv1;
- drm_mga_private_t *dev_priv =
- (drm_mga_private_t *) dev->dev_private;
+ DRM_OS_DEVICE;
+ drm_mga_private_t *dev_priv = dev->dev_private;
drm_device_dma_t *dma = dev->dma;
drm_buf_t *buf;
drm_mga_buf_priv_t *buf_priv;
drm_mga_indices_t indices;
- DRM_DEBUG("%s\n", __FUNCTION__);
- indices = *(drm_mga_indices_t *) data;
+ LOCK_TEST_WITH_RETURN( dev );
- if (!_DRM_LOCK_IS_HELD(dev->lock.hw_lock->lock)) {
- DRM_ERROR("mga_indices called without lock held\n");
- return EINVAL;
- }
+ DRM_OS_KRNFROMUSR( indices, (drm_mga_indices_t *) data, sizeof(indices) );
- DRM_DEBUG("mga_indices\n");
+ if(indices.idx < 0 || indices.idx > dma->buf_count) DRM_OS_RETURN( EINVAL );
buf = dma->buflist[indices.idx];
buf_priv = buf->dev_private;
buf_priv->discard = indices.discard;
- if (!mgaVerifyState(dev_priv)) {
- if (indices.discard) {
- if (buf_priv->dispatched == 1)
- AGEBUF(dev_priv, buf_priv);
+ if ( !mga_verify_state( dev_priv ) ) {
+ if ( indices.discard ) {
+ if ( buf_priv->dispatched == 1 )
+ AGE_BUFFER( buf_priv );
buf_priv->dispatched = 0;
- mga_freelist_put(dev, buf);
+ mga_freelist_put( dev, buf );
}
- return EINVAL;
+ DRM_OS_RETURN( EINVAL );
}
- mga_dma_dispatch_indices(dev, buf, indices.start, indices.end);
+ WRAP_TEST_WITH_RETURN( dev_priv );
+
+ mga_dma_dispatch_indices( dev, buf, indices.start, indices.end );
- PRIMUPDATE(dev_priv);
- mga_flush_write_combine();
- mga_dma_schedule(dev, 1);
return 0;
}
-
-
-static int
-mga_dma_get_buffers(drm_device_t * dev, drm_dma_t * d, struct proc *p)
+int mga_dma_iload( DRM_OS_IOCTL )
{
- int i, error;
+ DRM_OS_DEVICE;
+ drm_device_dma_t *dma = dev->dma;
+ drm_mga_private_t *dev_priv = dev->dev_private;
drm_buf_t *buf;
- DRM_DEBUG("%s\n", __FUNCTION__);
-
- for (i = d->granted_count; i < d->request_count; i++) {
- buf = mga_freelist_get(dev);
- if (!buf)
- break;
- buf->pid = p->p_pid;
- error = copyout(&buf->idx,
- &d->request_indices[i],
- sizeof(buf->idx));
- if (error) return error;
- error = copyout(&buf->total,
- &d->request_sizes[i],
- sizeof(buf->total));
- if (error) return error;
- ++d->granted_count;
+ drm_mga_buf_priv_t *buf_priv;
+ drm_mga_iload_t iload;
+ DRM_DEBUG( __FUNCTION__ ":\n" );
+
+ LOCK_TEST_WITH_RETURN( dev );
+
+ DRM_OS_KRNFROMUSR( iload, (drm_mga_iload_t *) data, sizeof(iload) );
+
+#if 0
+ if ( mga_do_wait_for_idle( dev_priv ) ) {
+ if ( MGA_DMA_DEBUG )
+ DRM_INFO( __FUNCTION__": -EBUSY\n" );
+ DRM_OS_RETURN( EBUSY );
}
+#endif
+ if(iload.idx < 0 || iload.idx > dma->buf_count) DRM_OS_RETURN( EINVAL );
+
+ buf = dma->buflist[iload.idx];
+ buf_priv = buf->dev_private;
+
+ if ( mga_verify_iload( dev_priv, iload.dstorg, iload.length ) ) {
+ mga_freelist_put( dev, buf );
+ DRM_OS_RETURN( EINVAL );
+ }
+
+ WRAP_TEST_WITH_RETURN( dev_priv );
+
+ mga_dma_dispatch_iload( dev, buf, iload.dstorg, iload.length );
+
+ /* Make sure we restore the 3D state next time.
+ */
+ dev_priv->sarea_priv->dirty |= MGA_UPLOAD_CONTEXT;
+
return 0;
}
-int mga_dma(dev_t kdev, u_long cmd, caddr_t data,
- int flags, struct proc *p)
+int mga_dma_blit( DRM_OS_IOCTL )
{
- drm_device_t *dev = kdev->si_drv1;
- drm_device_dma_t *dma = dev->dma;
- int retcode = 0;
- drm_dma_t d;
- DRM_DEBUG("%s\n", __FUNCTION__);
+ DRM_OS_DEVICE;
+ drm_mga_private_t *dev_priv = dev->dev_private;
+ drm_mga_sarea_t *sarea_priv = dev_priv->sarea_priv;
+ drm_mga_blit_t blit;
+ DRM_DEBUG( "%s\n", __FUNCTION__ );
- d = *(drm_dma_t *) data;
- DRM_DEBUG("%d %d: %d send, %d req\n",
- p->p_pid, d.context, d.send_count, d.request_count);
+ LOCK_TEST_WITH_RETURN( dev );
- if (!_DRM_LOCK_IS_HELD(dev->lock.hw_lock->lock)) {
- DRM_ERROR("mga_dma called without lock held\n");
- return EINVAL;
- }
+ DRM_OS_KRNFROMUSR( blit, (drm_mga_blit_t *) data, sizeof(blit) );
- /* Please don't send us buffers.
- */
- if (d.send_count != 0) {
- DRM_ERROR
- ("Process %d trying to send %d buffers via drmDMA\n",
- p->p_pid, d.send_count);
- return EINVAL;
- }
+ if ( sarea_priv->nbox > MGA_NR_SAREA_CLIPRECTS )
+ sarea_priv->nbox = MGA_NR_SAREA_CLIPRECTS;
- /* We'll send you buffers.
- */
- if (d.request_count < 0 || d.request_count > dma->buf_count) {
- DRM_ERROR
- ("Process %d trying to get %d buffers (of %d max)\n",
- p->p_pid, d.request_count, dma->buf_count);
- return EINVAL;
- }
+ if ( mga_verify_blit( dev_priv, blit.srcorg, blit.dstorg ) )
+ DRM_OS_RETURN( EINVAL );
- d.granted_count = 0;
+ WRAP_TEST_WITH_RETURN( dev_priv );
- if (d.request_count) {
- retcode = mga_dma_get_buffers(dev, &d, p);
- }
+ mga_dma_dispatch_blit( dev, &blit );
- DRM_DEBUG("%d returning, granted = %d\n",
- p->p_pid, d.granted_count);
- *(drm_dma_t *) data = d;
- return retcode;
+ /* Make sure we restore the 3D state next time.
+ */
+ dev_priv->sarea_priv->dirty |= MGA_UPLOAD_CONTEXT;
+
+ return 0;
}
diff --git a/bsd/mga_drm.h b/bsd/mga_drm.h
index eefa28d3..4af2ca2e 100644
--- a/bsd/mga_drm.h
+++ b/bsd/mga_drm.h
@@ -1,4 +1,4 @@
-/* mga_drm.h -- Public header for the Matrox g200/g400 driver
+/* mga_drm.h -- Public header for the Matrox g200/g400 driver -*- linux-c -*-
* Created: Tue Jan 25 01:50:01 1999 by jhartmann@precisioninsight.com
*
* Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
@@ -11,195 +11,178 @@
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
- *
+ *
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
- *
+ *
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
- * DEALINGS IN THE SOFTWARE.
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
*
- * Authors: Jeff Hartmann <jhartmann@valinux.com>
- * Keith Whitwell <keithw@valinux.com>
+ * Authors:
+ * Jeff Hartmann <jhartmann@valinux.com>
+ * Keith Whitwell <keithw@valinux.com>
*
+ * Rewritten by:
+ * Gareth Hughes <gareth@valinux.com>
*/
-#ifndef _MGA_DRM_H_
-#define _MGA_DRM_H_
+#ifndef __MGA_DRM_H__
+#define __MGA_DRM_H__
/* WARNING: If you change any of these defines, make sure to change the
- * defines in the Xserver file (xf86drmMga.h)
- */
-#ifndef _MGA_DEFINES_
-#define _MGA_DEFINES_
-
-#define MGA_F 0x1 /* fog */
-#define MGA_A 0x2 /* alpha */
-#define MGA_S 0x4 /* specular */
-#define MGA_T2 0x8 /* multitexture */
-
-#define MGA_WARP_TGZ 0
-#define MGA_WARP_TGZF (MGA_F)
-#define MGA_WARP_TGZA (MGA_A)
-#define MGA_WARP_TGZAF (MGA_F|MGA_A)
-#define MGA_WARP_TGZS (MGA_S)
-#define MGA_WARP_TGZSF (MGA_S|MGA_F)
-#define MGA_WARP_TGZSA (MGA_S|MGA_A)
-#define MGA_WARP_TGZSAF (MGA_S|MGA_F|MGA_A)
-#define MGA_WARP_T2GZ (MGA_T2)
-#define MGA_WARP_T2GZF (MGA_T2|MGA_F)
-#define MGA_WARP_T2GZA (MGA_T2|MGA_A)
-#define MGA_WARP_T2GZAF (MGA_T2|MGA_A|MGA_F)
-#define MGA_WARP_T2GZS (MGA_T2|MGA_S)
-#define MGA_WARP_T2GZSF (MGA_T2|MGA_S|MGA_F)
-#define MGA_WARP_T2GZSA (MGA_T2|MGA_S|MGA_A)
-#define MGA_WARP_T2GZSAF (MGA_T2|MGA_S|MGA_F|MGA_A)
-
-#define MGA_MAX_G400_PIPES 16
-#define MGA_MAX_G200_PIPES 8 /* no multitex */
-#define MGA_MAX_WARP_PIPES MGA_MAX_G400_PIPES
-
-#define MGA_CARD_TYPE_G200 1
-#define MGA_CARD_TYPE_G400 2
-
-#define MGA_FRONT 0x1
-#define MGA_BACK 0x2
-#define MGA_DEPTH 0x4
-
-/* 3d state excluding texture units:
+ * defines in the Xserver file (mga_sarea.h)
*/
-#define MGA_CTXREG_DSTORG 0 /* validated */
-#define MGA_CTXREG_MACCESS 1
-#define MGA_CTXREG_PLNWT 2
-#define MGA_CTXREG_DWGCTL 3
-#define MGA_CTXREG_ALPHACTRL 4
-#define MGA_CTXREG_FOGCOLOR 5
-#define MGA_CTXREG_WFLAG 6
-#define MGA_CTXREG_TDUAL0 7
-#define MGA_CTXREG_TDUAL1 8
-#define MGA_CTXREG_FCOL 9
-#define MGA_CTX_SETUP_SIZE 10
-
-/* 2d state
- */
-#define MGA_2DREG_PITCH 0
-#define MGA_2D_SETUP_SIZE 1
+#ifndef __MGA_SAREA_DEFINES__
+#define __MGA_SAREA_DEFINES__
-/* Each texture unit has a state:
+/* WARP pipe flags
*/
-#define MGA_TEXREG_CTL 0
-#define MGA_TEXREG_CTL2 1
-#define MGA_TEXREG_FILTER 2
-#define MGA_TEXREG_BORDERCOL 3
-#define MGA_TEXREG_ORG 4 /* validated */
-#define MGA_TEXREG_ORG1 5
-#define MGA_TEXREG_ORG2 6
-#define MGA_TEXREG_ORG3 7
-#define MGA_TEXREG_ORG4 8
-#define MGA_TEXREG_WIDTH 9
-#define MGA_TEXREG_HEIGHT 10
-#define MGA_TEX_SETUP_SIZE 11
+#define MGA_F 0x1 /* fog */
+#define MGA_A 0x2 /* alpha */
+#define MGA_S 0x4 /* specular */
+#define MGA_T2 0x8 /* multitexture */
+
+#define MGA_WARP_TGZ 0
+#define MGA_WARP_TGZF (MGA_F)
+#define MGA_WARP_TGZA (MGA_A)
+#define MGA_WARP_TGZAF (MGA_F|MGA_A)
+#define MGA_WARP_TGZS (MGA_S)
+#define MGA_WARP_TGZSF (MGA_S|MGA_F)
+#define MGA_WARP_TGZSA (MGA_S|MGA_A)
+#define MGA_WARP_TGZSAF (MGA_S|MGA_F|MGA_A)
+#define MGA_WARP_T2GZ (MGA_T2)
+#define MGA_WARP_T2GZF (MGA_T2|MGA_F)
+#define MGA_WARP_T2GZA (MGA_T2|MGA_A)
+#define MGA_WARP_T2GZAF (MGA_T2|MGA_A|MGA_F)
+#define MGA_WARP_T2GZS (MGA_T2|MGA_S)
+#define MGA_WARP_T2GZSF (MGA_T2|MGA_S|MGA_F)
+#define MGA_WARP_T2GZSA (MGA_T2|MGA_S|MGA_A)
+#define MGA_WARP_T2GZSAF (MGA_T2|MGA_S|MGA_F|MGA_A)
+
+#define MGA_MAX_G200_PIPES 8 /* no multitex */
+#define MGA_MAX_G400_PIPES 16
+#define MGA_MAX_WARP_PIPES MGA_MAX_G400_PIPES
+#define MGA_WARP_UCODE_SIZE 32768 /* in bytes */
+
+#define MGA_CARD_TYPE_G200 1
+#define MGA_CARD_TYPE_G400 2
+
+
+#define MGA_FRONT 0x1
+#define MGA_BACK 0x2
+#define MGA_DEPTH 0x4
/* What needs to be changed for the current vertex dma buffer?
*/
-#define MGA_UPLOAD_CTX 0x1
-#define MGA_UPLOAD_TEX0 0x2
-#define MGA_UPLOAD_TEX1 0x4
-#define MGA_UPLOAD_PIPE 0x8
-#define MGA_UPLOAD_TEX0IMAGE 0x10 /* handled client-side */
-#define MGA_UPLOAD_TEX1IMAGE 0x20 /* handled client-side */
-#define MGA_UPLOAD_2D 0x40
-#define MGA_WAIT_AGE 0x80 /* handled client-side */
-#define MGA_UPLOAD_CLIPRECTS 0x100 /* handled client-side */
-#define MGA_DMA_FLUSH 0x200 /* set when someone gets the lock
- quiescent */
+#define MGA_UPLOAD_CONTEXT 0x1
+#define MGA_UPLOAD_TEX0 0x2
+#define MGA_UPLOAD_TEX1 0x4
+#define MGA_UPLOAD_PIPE 0x8
+#define MGA_UPLOAD_TEX0IMAGE 0x10 /* handled client-side */
+#define MGA_UPLOAD_TEX1IMAGE 0x20 /* handled client-side */
+#define MGA_UPLOAD_2D 0x40
+#define MGA_WAIT_AGE 0x80 /* handled client-side */
+#define MGA_UPLOAD_CLIPRECTS 0x100 /* handled client-side */
+#if 0
+#define MGA_DMA_FLUSH 0x200 /* set when someone gets the lock
+ quiescent */
+#endif
/* 32 buffers of 64k each, total 2 meg.
*/
-#define MGA_DMA_BUF_ORDER 16
-#define MGA_DMA_BUF_SZ (1<<MGA_DMA_BUF_ORDER)
-#define MGA_DMA_BUF_NR 31
+#define MGA_BUFFER_SIZE (1 << 16)
+#define MGA_NUM_BUFFERS 128
/* Keep these small for testing.
*/
-#define MGA_NR_SAREA_CLIPRECTS 8
+#define MGA_NR_SAREA_CLIPRECTS 8
/* 2 heaps (1 for card, 1 for agp), each divided into upto 128
- * regions, subject to a minimum region size of (1<<16) == 64k.
+ * regions, subject to a minimum region size of (1<<16) == 64k.
*
* Clients may subdivide regions internally, but when sharing between
- * clients, the region size is the minimum granularity.
+ * clients, the region size is the minimum granularity.
*/
-#define MGA_CARD_HEAP 0
-#define MGA_AGP_HEAP 1
-#define MGA_NR_TEX_HEAPS 2
-#define MGA_NR_TEX_REGIONS 16
-#define MGA_LOG_MIN_TEX_REGION_SIZE 16
-#endif
+#define MGA_CARD_HEAP 0
+#define MGA_AGP_HEAP 1
+#define MGA_NR_TEX_HEAPS 2
+#define MGA_NR_TEX_REGIONS 16
+#define MGA_LOG_MIN_TEX_REGION_SIZE 16
-typedef struct _drm_mga_warp_index {
- int installed;
- unsigned long phys_addr;
- int size;
-} drm_mga_warp_index_t;
+#endif /* __MGA_SAREA_DEFINES__ */
-typedef struct drm_mga_init {
- enum {
- MGA_INIT_DMA = 0x01,
- MGA_CLEANUP_DMA = 0x02
- } func;
- int reserved_map_agpstart;
- int reserved_map_idx;
- int buffer_map_idx;
- int sarea_priv_offset;
- int primary_size;
- int warp_ucode_size;
- unsigned int frontOffset;
- unsigned int backOffset;
- unsigned int depthOffset;
- unsigned int textureOffset;
- unsigned int textureSize;
- unsigned int agpTextureOffset;
- unsigned int agpTextureSize;
- unsigned int cpp;
- unsigned int stride;
- int sgram;
- int chipset;
- drm_mga_warp_index_t WarpIndex[MGA_MAX_WARP_PIPES];
- unsigned int mAccess;
-} drm_mga_init_t;
-/* Warning: if you change the sarea structure, you must change the Xserver
- * structures as well */
+/* Setup registers for 3D context
+ */
+typedef struct {
+ unsigned int dstorg;
+ unsigned int maccess;
+ unsigned int plnwt;
+ unsigned int dwgctl;
+ unsigned int alphactrl;
+ unsigned int fogcolor;
+ unsigned int wflag;
+ unsigned int tdualstage0;
+ unsigned int tdualstage1;
+ unsigned int fcol;
+ unsigned int stencil;
+ unsigned int stencilctl;
+} drm_mga_context_regs_t;
+
+/* Setup registers for 2D, X server
+ */
+typedef struct {
+ unsigned int pitch;
+} drm_mga_server_regs_t;
-typedef struct _drm_mga_tex_region {
- unsigned char next, prev;
- unsigned char in_use;
- unsigned int age;
-} drm_mga_tex_region_t;
+/* Setup registers for each texture unit
+ */
+typedef struct {
+ unsigned int texctl;
+ unsigned int texctl2;
+ unsigned int texfilter;
+ unsigned int texbordercol;
+ unsigned int texorg;
+ unsigned int texwidth;
+ unsigned int texheight;
+ unsigned int texorg1;
+ unsigned int texorg2;
+ unsigned int texorg3;
+ unsigned int texorg4;
+} drm_mga_texture_regs_t;
+
+/* General aging mechanism
+ */
+typedef struct {
+ unsigned int head; /* Position of head pointer */
+ unsigned int wrap; /* Primary DMA wrap count */
+} drm_mga_age_t;
typedef struct _drm_mga_sarea {
/* The channel for communication of state information to the kernel
* on firing a vertex dma buffer.
*/
- unsigned int ContextState[MGA_CTX_SETUP_SIZE];
- unsigned int ServerState[MGA_2D_SETUP_SIZE];
- unsigned int TexState[2][MGA_TEX_SETUP_SIZE];
- unsigned int WarpPipe;
+ drm_mga_context_regs_t context_state;
+ drm_mga_server_regs_t server_state;
+ drm_mga_texture_regs_t tex_state[2];
+ unsigned int warp_pipe;
unsigned int dirty;
+ unsigned int vertsize;
- unsigned int nbox;
+ /* The current cliprects, or a subset thereof.
+ */
drm_clip_rect_t boxes[MGA_NR_SAREA_CLIPRECTS];
-
+ unsigned int nbox;
/* Information about the most recently used 3d drawable. The
- * client fills in the req_* fields, the server fills in the
+ * client fills in the req_* fields, the server fills in the
* exported_ fields and puts the cliprects into boxes, above.
*
* The client clears the exported_drawable field before
@@ -208,62 +191,120 @@ typedef struct _drm_mga_sarea {
unsigned int req_drawable; /* the X drawable id */
unsigned int req_draw_buffer; /* MGA_FRONT or MGA_BACK */
- unsigned int exported_drawable;
- unsigned int exported_index;
- unsigned int exported_stamp;
- unsigned int exported_buffers;
+ unsigned int exported_drawable;
+ unsigned int exported_index;
+ unsigned int exported_stamp;
+ unsigned int exported_buffers;
unsigned int exported_nfront;
unsigned int exported_nback;
- int exported_back_x, exported_front_x, exported_w;
+ int exported_back_x, exported_front_x, exported_w;
int exported_back_y, exported_front_y, exported_h;
drm_clip_rect_t exported_boxes[MGA_NR_SAREA_CLIPRECTS];
-
+
/* Counters for aging textures and for client-side throttling.
*/
+ unsigned int status[4];
+ unsigned int last_wrap;
+
+ drm_mga_age_t last_frame;
unsigned int last_enqueue; /* last time a buffer was enqueued */
unsigned int last_dispatch; /* age of the most recently dispatched buffer */
unsigned int last_quiescent; /* */
-
- /* LRU lists for texture memory in agp space and on the card
+ /* LRU lists for texture memory in agp space and on the card.
*/
- drm_mga_tex_region_t texList[MGA_NR_TEX_HEAPS][MGA_NR_TEX_REGIONS+1];
+ drm_tex_region_t texList[MGA_NR_TEX_HEAPS][MGA_NR_TEX_REGIONS+1];
unsigned int texAge[MGA_NR_TEX_HEAPS];
-
+
/* Mechanism to validate card state.
*/
int ctxOwner;
-} drm_mga_sarea_t;
+} drm_mga_sarea_t;
-/* Device specific ioctls:
+
+/* WARNING: If you change any of these defines, make sure to change the
+ * defines in the Xserver file (xf86drmMga.h)
*/
-typedef struct _drm_mga_clear {
+typedef struct _drm_mga_warp_index {
+ int installed;
+ unsigned long phys_addr;
+ int size;
+} drm_mga_warp_index_t;
+
+typedef struct drm_mga_init {
+ enum {
+ MGA_INIT_DMA = 0x01,
+ MGA_CLEANUP_DMA = 0x02
+ } func;
+
+ unsigned long sarea_priv_offset;
+
+ int chipset;
+ int sgram;
+
+ unsigned int maccess;
+
+ unsigned int fb_cpp;
+ unsigned int front_offset, front_pitch;
+ unsigned int back_offset, back_pitch;
+
+ unsigned int depth_cpp;
+ unsigned int depth_offset, depth_pitch;
+
+ unsigned int texture_offset[MGA_NR_TEX_HEAPS];
+ unsigned int texture_size[MGA_NR_TEX_HEAPS];
+
+ unsigned long fb_offset;
+ unsigned long mmio_offset;
+ unsigned long status_offset;
+ unsigned long warp_offset;
+ unsigned long primary_offset;
+ unsigned long buffers_offset;
+} drm_mga_init_t;
+
+typedef struct drm_mga_fullscreen {
+ enum {
+ MGA_INIT_FULLSCREEN = 0x01,
+ MGA_CLEANUP_FULLSCREEN = 0x02
+ } func;
+} drm_mga_fullscreen_t;
+
+typedef struct drm_mga_clear {
+ unsigned int flags;
unsigned int clear_color;
unsigned int clear_depth;
- unsigned int flags;
+ unsigned int color_mask;
+ unsigned int depth_mask;
} drm_mga_clear_t;
-typedef struct _drm_mga_swap {
- int dummy;
-} drm_mga_swap_t;
+typedef struct drm_mga_vertex {
+ int idx; /* buffer to queue */
+ int used; /* bytes in use */
+ int discard; /* client finished with buffer? */
+} drm_mga_vertex_t;
+
+typedef struct drm_mga_indices {
+ int idx; /* buffer to queue */
+ unsigned int start;
+ unsigned int end;
+ int discard; /* client finished with buffer? */
+} drm_mga_indices_t;
-typedef struct _drm_mga_iload {
+typedef struct drm_mga_iload {
int idx;
- int length;
- unsigned int destOrg;
+ unsigned int dstorg;
+ unsigned int length;
} drm_mga_iload_t;
-typedef struct _drm_mga_vertex {
- int idx; /* buffer to queue */
- int used; /* bytes in use */
- int discard; /* client finished with buffer? */
-} drm_mga_vertex_t;
-
-typedef struct _drm_mga_indices {
- int idx; /* buffer to queue */
- unsigned int start;
- unsigned int end;
- int discard; /* client finished with buffer? */
-} drm_mga_indices_t;
+typedef struct _drm_mga_blit {
+ unsigned int planemask;
+ unsigned int srcorg;
+ unsigned int dstorg;
+ int src_pitch, dst_pitch;
+ int delta_sx, delta_sy;
+ int delta_dx, delta_dy;
+ int height, ydir; /* flip image vertically */
+ int source_pitch, dest_pitch;
+} drm_mga_blit_t;
#endif
diff --git a/bsd/r128/Makefile b/bsd/r128/Makefile
new file mode 100644
index 00000000..ae5622ed
--- /dev/null
+++ b/bsd/r128/Makefile
@@ -0,0 +1,25 @@
+# $FreeBSD$
+
+KMOD = r128
+NOMAN= YES
+SRCS = r128_cce.c r128_drv.c r128_state.c
+SRCS += device_if.h bus_if.h pci_if.h opt_drm_linux.h
+CFLAGS += ${DEBUG_FLAGS} -I. -I..
+
+@:
+ ln -sf /sys @
+
+machine:
+ ln -sf /sys/i386/include machine
+
+.if ${MACHINE_ARCH} == "i386"
+# This line enables linux ioctl handling
+# If you want support for this uncomment this line
+#R128_OPTS= "\#define DRM_LINUX" 1
+.endif
+
+opt_drm_linux.h:
+ touch opt_drm_linux.h
+ echo $(R128_OPTS) >> opt_drm_linux.h
+
+.include <bsd.kmod.mk>
diff --git a/bsd/r128_drm.h b/bsd/r128_drm.h
index bff103c2..0fc6a6cd 100644
--- a/bsd/r128_drm.h
+++ b/bsd/r128_drm.h
@@ -2,6 +2,7 @@
* Created: Wed Apr 5 19:24:19 2000 by kevin@precisioninsight.com
*
* Copyright 2000 Precision Insight, Inc., Cedar Park, Texas.
+ * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
* All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
@@ -10,11 +11,11 @@
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
- *
+ *
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
- *
+ *
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
@@ -23,89 +24,264 @@
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*
- * Authors: Kevin E. Martin <kevin@precisioninsight.com>
- *
- * $XFree86: xc/programs/Xserver/hw/xfree86/os-support/bsd/drm/kernel/r128_drm.h,v 1.1 2000/06/17 00:03:29 martin Exp $
+ * Authors:
+ * Gareth Hughes <gareth@valinux.com>
+ * Kevin E. Martin <martin@valinux.com>
+ */
+
+#ifndef __R128_DRM_H__
+#define __R128_DRM_H__
+
+/* WARNING: If you change any of these defines, make sure to change the
+ * defines in the X server file (r128_sarea.h)
+ */
+#ifndef __R128_SAREA_DEFINES__
+#define __R128_SAREA_DEFINES__
+
+/* What needs to be changed for the current vertex buffer?
+ */
+#define R128_UPLOAD_CONTEXT 0x001
+#define R128_UPLOAD_SETUP 0x002
+#define R128_UPLOAD_TEX0 0x004
+#define R128_UPLOAD_TEX1 0x008
+#define R128_UPLOAD_TEX0IMAGES 0x010
+#define R128_UPLOAD_TEX1IMAGES 0x020
+#define R128_UPLOAD_CORE 0x040
+#define R128_UPLOAD_MASKS 0x080
+#define R128_UPLOAD_WINDOW 0x100
+#define R128_UPLOAD_CLIPRECTS 0x200 /* handled client-side */
+#define R128_REQUIRE_QUIESCENCE 0x400
+#define R128_UPLOAD_ALL 0x7ff
+
+#define R128_FRONT 0x1
+#define R128_BACK 0x2
+#define R128_DEPTH 0x4
+
+/* Primitive types
+ */
+#define R128_POINTS 0x1
+#define R128_LINES 0x2
+#define R128_LINE_STRIP 0x3
+#define R128_TRIANGLES 0x4
+#define R128_TRIANGLE_FAN 0x5
+#define R128_TRIANGLE_STRIP 0x6
+
+/* Vertex/indirect buffer size
+ */
+#define R128_BUFFER_SIZE 16384
+
+/* Byte offsets for indirect buffer data
+ */
+#define R128_INDEX_PRIM_OFFSET 20
+#define R128_HOSTDATA_BLIT_OFFSET 32
+
+/* Keep these small for testing.
+ */
+#define R128_NR_SAREA_CLIPRECTS 12
+
+/* There are 2 heaps (local/AGP). Each region within a heap is a
+ * minimum of 64k, and there are at most 64 of them per heap.
+ */
+#define R128_LOCAL_TEX_HEAP 0
+#define R128_AGP_TEX_HEAP 1
+#define R128_NR_TEX_HEAPS 2
+#define R128_NR_TEX_REGIONS 64
+#define R128_LOG_TEX_GRANULARITY 16
+
+#define R128_NR_CONTEXT_REGS 12
+
+#define R128_MAX_TEXTURE_LEVELS 11
+#define R128_MAX_TEXTURE_UNITS 2
+
+#endif /* __R128_SAREA_DEFINES__ */
+
+typedef struct {
+ /* Context state - can be written in one large chunk */
+ unsigned int dst_pitch_offset_c;
+ unsigned int dp_gui_master_cntl_c;
+ unsigned int sc_top_left_c;
+ unsigned int sc_bottom_right_c;
+ unsigned int z_offset_c;
+ unsigned int z_pitch_c;
+ unsigned int z_sten_cntl_c;
+ unsigned int tex_cntl_c;
+ unsigned int misc_3d_state_cntl_reg;
+ unsigned int texture_clr_cmp_clr_c;
+ unsigned int texture_clr_cmp_msk_c;
+ unsigned int fog_color_c;
+
+ /* Texture state */
+ unsigned int tex_size_pitch_c;
+ unsigned int constant_color_c;
+
+ /* Setup state */
+ unsigned int pm4_vc_fpu_setup;
+ unsigned int setup_cntl;
+
+ /* Mask state */
+ unsigned int dp_write_mask;
+ unsigned int sten_ref_mask_c;
+ unsigned int plane_3d_mask_c;
+
+ /* Window state */
+ unsigned int window_xy_offset;
+
+ /* Core state */
+ unsigned int scale_3d_cntl;
+} drm_r128_context_regs_t;
+
+/* Setup registers for each texture unit
*/
+typedef struct {
+ unsigned int tex_cntl;
+ unsigned int tex_combine_cntl;
+ unsigned int tex_size_pitch;
+ unsigned int tex_offset[R128_MAX_TEXTURE_LEVELS];
+ unsigned int tex_border_color;
+} drm_r128_texture_regs_t;
+
+
+typedef struct drm_r128_sarea {
+ /* The channel for communication of state information to the kernel
+ * on firing a vertex buffer.
+ */
+ drm_r128_context_regs_t context_state;
+ drm_r128_texture_regs_t tex_state[R128_MAX_TEXTURE_UNITS];
+ unsigned int dirty;
+ unsigned int vertsize;
+ unsigned int vc_format;
+
+ /* The current cliprects, or a subset thereof.
+ */
+ drm_clip_rect_t boxes[R128_NR_SAREA_CLIPRECTS];
+ unsigned int nbox;
+
+ /* Counters for client-side throttling of rendering clients.
+ */
+ unsigned int last_frame;
+ unsigned int last_dispatch;
+
+ drm_tex_region_t tex_list[R128_NR_TEX_HEAPS][R128_NR_TEX_REGIONS+1];
+ int tex_age[R128_NR_TEX_HEAPS];
+ int ctx_owner;
+} drm_r128_sarea_t;
-#ifndef _R128_DRM_H_
-#define _R128_DRM_H_
/* WARNING: If you change any of these defines, make sure to change the
* defines in the Xserver file (xf86drmR128.h)
*/
typedef struct drm_r128_init {
- enum {
+ enum {
R128_INIT_CCE = 0x01,
R128_CLEANUP_CCE = 0x02
} func;
+#if CONFIG_XFREE86_VERSION < XFREE86_VERSION(4,1,0,0)
int sarea_priv_offset;
+#else
+ unsigned long sarea_priv_offset;
+#endif
int is_pci;
int cce_mode;
- int cce_fifo_size;
int cce_secure;
int ring_size;
int usec_timeout;
- int fb_offset;
- int agp_ring_offset;
- int agp_read_ptr_offset;
- int agp_vertbufs_offset;
- int agp_indbufs_offset;
- int agp_textures_offset;
- int mmio_offset;
+ unsigned int fb_bpp;
+ unsigned int front_offset, front_pitch;
+ unsigned int back_offset, back_pitch;
+ unsigned int depth_bpp;
+ unsigned int depth_offset, depth_pitch;
+ unsigned int span_offset;
+
+#if CONFIG_XFREE86_VERSION < XFREE86_VERSION(4,1,0,0)
+ unsigned int fb_offset;
+ unsigned int mmio_offset;
+ unsigned int ring_offset;
+ unsigned int ring_rptr_offset;
+ unsigned int buffers_offset;
+ unsigned int agp_textures_offset;
+#else
+ unsigned long fb_offset;
+ unsigned long mmio_offset;
+ unsigned long ring_offset;
+ unsigned long ring_rptr_offset;
+ unsigned long buffers_offset;
+ unsigned long agp_textures_offset;
+#endif
} drm_r128_init_t;
-typedef struct drm_r128_packet {
- unsigned long *buffer;
- int count;
- int flags;
-} drm_r128_packet_t;
-
-typedef enum drm_r128_prim {
- _DRM_R128_PRIM_NONE = 0x0001,
- _DRM_R128_PRIM_POINT = 0x0002,
- _DRM_R128_PRIM_LINE = 0x0004,
- _DRM_R128_PRIM_POLY_LINE = 0x0008,
- _DRM_R128_PRIM_TRI_LIST = 0x0010,
- _DRM_R128_PRIM_TRI_FAN = 0x0020,
- _DRM_R128_PRIM_TRI_STRIP = 0x0040,
- _DRM_R128_PRIM_TRI_TYPE2 = 0x0080
-} drm_r128_prim_t;
+typedef struct drm_r128_cce_stop {
+ int flush;
+ int idle;
+} drm_r128_cce_stop_t;
+
+typedef struct drm_r128_clear {
+ unsigned int flags;
+#if CONFIG_XFREE86_VERSION < XFREE86_VERSION(4,1,0,0)
+ int x, y, w, h;
+#endif
+ unsigned int clear_color;
+ unsigned int clear_depth;
+#if CONFIG_XFREE86_VERSION >= XFREE86_VERSION(4,1,0,0)
+ unsigned int color_mask;
+ unsigned int depth_mask;
+#endif
+} drm_r128_clear_t;
typedef struct drm_r128_vertex {
- /* Indices here refer to the offset into
- buflist in drm_buf_get_t. */
- int send_count; /* Number of buffers to send */
- int *send_indices; /* List of handles to buffers */
- int *send_sizes; /* Lengths of data to send */
- drm_r128_prim_t prim; /* Primitive type */
- int request_count; /* Number of buffers requested */
- int *request_indices; /* Buffer information */
- int *request_sizes;
- int granted_count; /* Number of buffers granted */
+ int prim;
+ int idx; /* Index of vertex buffer */
+ int count; /* Number of vertices in buffer */
+ int discard; /* Client finished with buffer? */
} drm_r128_vertex_t;
-/* WARNING: If you change any of these defines, make sure to change the
- * defines in the Xserver file (r128_sarea.h)
- */
-#define R128_LOCAL_TEX_HEAP 0
-#define R128_AGP_TEX_HEAP 1
-#define R128_NR_TEX_HEAPS 2
-#define R128_NR_TEX_REGIONS 64
-#define R128_LOG_TEX_GRANULARITY 16
-
-typedef struct drm_tex_region {
- unsigned char next, prev;
- unsigned char in_use;
- int age;
-} drm_tex_region_t;
+typedef struct drm_r128_indices {
+ int prim;
+ int idx;
+ int start;
+ int end;
+ int discard; /* Client finished with buffer? */
+} drm_r128_indices_t;
-typedef struct drm_r128_sarea {
- drm_tex_region_t tex_list[R128_NR_TEX_HEAPS][R128_NR_TEX_REGIONS+1];
- int tex_age[R128_NR_TEX_HEAPS];
- int ctx_owner;
- int ring_write;
-} drm_r128_sarea_t;
+typedef struct drm_r128_blit {
+ int idx;
+ int pitch;
+ int offset;
+ int format;
+ unsigned short x, y;
+ unsigned short width, height;
+} drm_r128_blit_t;
+
+typedef struct drm_r128_depth {
+ enum {
+ R128_WRITE_SPAN = 0x01,
+ R128_WRITE_PIXELS = 0x02,
+ R128_READ_SPAN = 0x03,
+ R128_READ_PIXELS = 0x04
+ } func;
+ int n;
+ int *x;
+ int *y;
+ unsigned int *buffer;
+ unsigned char *mask;
+} drm_r128_depth_t;
+
+typedef struct drm_r128_stipple {
+ unsigned int *mask;
+} drm_r128_stipple_t;
+
+typedef struct drm_r128_indirect {
+ int idx;
+ int start;
+ int end;
+ int discard;
+} drm_r128_indirect_t;
+
+typedef struct drm_r128_fullscreen {
+ enum {
+ R128_INIT_FULLSCREEN = 0x01,
+ R128_CLEANUP_FULLSCREEN = 0x02
+ } func;
+} drm_r128_fullscreen_t;
#endif
diff --git a/bsd/radeon/Makefile b/bsd/radeon/Makefile
new file mode 100644
index 00000000..b1d77bf4
--- /dev/null
+++ b/bsd/radeon/Makefile
@@ -0,0 +1,25 @@
+# $FreeBSD$
+
+KMOD = radeon
+NOMAN= YES
+SRCS = radeon_cp.c radeon_drv.c radeon_state.c
+SRCS += device_if.h bus_if.h pci_if.h opt_drm_linux.h
+CFLAGS += ${DEBUG_FLAGS} -I. -I..
+
+@:
+ ln -sf /sys @
+
+machine:
+ ln -sf /sys/i386/include machine
+
+.if ${MACHINE_ARCH} == "i386"
+# This line enables linux ioctl handling
+# If you want support for this uncomment this line
+#RADEON_OPTS= "\#define DRM_LINUX" 1
+.endif
+
+opt_drm_linux.h:
+ touch opt_drm_linux.h
+ echo $(RADEON_OPTS) >> opt_drm_linux.h
+
+.include <bsd.kmod.mk>
diff --git a/bsd/tdfx/Makefile b/bsd/tdfx/Makefile
index 4362a5ba..d177ff60 100644
--- a/bsd/tdfx/Makefile
+++ b/bsd/tdfx/Makefile
@@ -1,10 +1,10 @@
# $FreeBSD$
-KMOD = tdfx
-SRCS = tdfx_drv.c tdfx_context.c
-SRCS += device_if.h bus_if.h pci_if.h
-CFLAGS += ${DEBUG_FLAGS} -I. -I..
-KMODDEPS = drm
+KMOD= tdfx
+NOMAN= YES
+SRCS= tdfx_drv.c
+SRCS+= device_if.h bus_if.h pci_if.h opt_drm_linux.h
+CFLAGS+= ${DEBUG_FLAGS} -I. -I..
@:
ln -sf /sys @
@@ -12,4 +12,14 @@ KMODDEPS = drm
machine:
ln -sf /sys/i386/include machine
+.if ${MACHINE_ARCH} == "i386"
+# This line enables linux ioctl handling
+# If you want support for this uncomment this line
+#TDFX_OPTS= "\#define DRM_LINUX" 1
+.endif
+
+opt_drm_linux.h:
+ touch opt_drm_linux.h
+ echo $(TDFX_OPTS) >> opt_drm_linux.h
+
.include <bsd.kmod.mk>
diff --git a/bsd/tdfx/tdfx_drv.c b/bsd/tdfx/tdfx_drv.c
index 202df7c3..29d19558 100644
--- a/bsd/tdfx/tdfx_drv.c
+++ b/bsd/tdfx/tdfx_drv.c
@@ -1,4 +1,4 @@
-/* tdfx.c -- tdfx driver -*- c -*-
+/* tdfx_drv.c -- tdfx driver -*- linux-c -*-
* Created: Thu Oct 7 10:38:32 1999 by faith@precisioninsight.com
*
* Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
@@ -11,11 +11,11 @@
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
- *
+ *
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
- *
+ *
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
@@ -23,719 +23,78 @@
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
- *
+ *
* Authors:
* Rickard E. (Rik) Faith <faith@valinux.com>
* Daryll Strauss <daryll@valinux.com>
- *
+ * Gareth Hughes <gareth@valinux.com>
*/
-#include "drmP.h"
-#include "tdfx_drv.h"
+#include <sys/types.h>
+#include <sys/bus.h>
#include <pci/pcivar.h>
+#include <opt_drm_linux.h>
-MODULE_DEPEND(tdfx, drm, 1, 1, 1);
-#ifdef DRM_AGP
-MODULE_DEPEND(tdfx, agp, 1, 1, 1);
-#endif
-
-#define TDFX_NAME "tdfx"
-#define TDFX_DESC "tdfx"
-#define TDFX_DATE "20000928"
-#define TDFX_MAJOR 1
-#define TDFX_MINOR 0
-#define TDFX_PATCHLEVEL 0
-
-static int tdfx_init(device_t nbdev);
-static void tdfx_cleanup(device_t nbdev);
-
-drm_ctx_t tdfx_res_ctx;
-
-static int tdfx_probe(device_t dev)
-{
- const char *s = 0;
-
- switch (pci_get_devid(dev)) {
- case 0x0003121a:
- s = "3Dfx Voodoo Banshee graphics accelerator";
- break;
-
- case 0x0005121a:
- s = "3Dfx Voodoo 3 graphics accelerator";
- break;
- case 0x0009121a:
- s = "3Dfx Voodoo 5 graphics accelerator";
- break;
- }
-
- if (s) {
- device_set_desc(dev, s);
- return 0;
- }
-
- return ENXIO;
-}
-
-static int tdfx_attach(device_t dev)
-{
- tdfx_init(dev);
- return 0;
-}
-
-static int tdfx_detach(device_t dev)
-{
- tdfx_cleanup(dev);
- return 0;
-}
-
-static device_method_t tdfx_methods[] = {
- /* Device interface */
- DEVMETHOD(device_probe, tdfx_probe),
- DEVMETHOD(device_attach, tdfx_attach),
- DEVMETHOD(device_detach, tdfx_detach),
-
- { 0, 0 }
-};
-
-static driver_t tdfx_driver = {
- "drm",
- tdfx_methods,
- sizeof(drm_device_t),
-};
-
-static devclass_t tdfx_devclass;
-#define TDFX_SOFTC(unit) \
- ((drm_device_t *) devclass_get_softc(tdfx_devclass, unit))
-
-DRIVER_MODULE(if_tdfx, pci, tdfx_driver, tdfx_devclass, 0, 0);
-
-#define CDEV_MAJOR 145
- /* tdfx_drv.c */
-static d_open_t tdfx_open;
-static d_close_t tdfx_close;
-static d_ioctl_t tdfx_version;
-static d_ioctl_t tdfx_ioctl;
-static d_ioctl_t tdfx_lock;
-static d_ioctl_t tdfx_unlock;
-
-static struct cdevsw tdfx_cdevsw = {
- /* open */ tdfx_open,
- /* close */ tdfx_close,
- /* read */ drm_read,
- /* write */ drm_write,
- /* ioctl */ tdfx_ioctl,
- /* poll */ drm_poll,
- /* mmap */ drm_mmap,
- /* strategy */ nostrategy,
- /* name */ "tdfx",
- /* maj */ CDEV_MAJOR,
- /* dump */ nodump,
- /* psize */ nopsize,
- /* flags */ D_TTY | D_TRACKCLOSE,
- /* bmaj */ -1
-};
-
-static drm_ioctl_desc_t tdfx_ioctls[] = {
- [DRM_IOCTL_NR(DRM_IOCTL_VERSION)] = { tdfx_version, 0, 0 },
- [DRM_IOCTL_NR(DRM_IOCTL_GET_UNIQUE)] = { drm_getunique, 0, 0 },
- [DRM_IOCTL_NR(DRM_IOCTL_GET_MAGIC)] = { drm_getmagic, 0, 0 },
- [DRM_IOCTL_NR(DRM_IOCTL_IRQ_BUSID)] = { drm_irq_busid, 0, 1 },
-
- [DRM_IOCTL_NR(DRM_IOCTL_SET_UNIQUE)] = { drm_setunique, 1, 1 },
- [DRM_IOCTL_NR(DRM_IOCTL_BLOCK)] = { drm_block, 1, 1 },
- [DRM_IOCTL_NR(DRM_IOCTL_UNBLOCK)] = { drm_unblock, 1, 1 },
- [DRM_IOCTL_NR(DRM_IOCTL_AUTH_MAGIC)] = { drm_authmagic, 1, 1 },
- [DRM_IOCTL_NR(DRM_IOCTL_ADD_MAP)] = { drm_addmap, 1, 1 },
-
- [DRM_IOCTL_NR(DRM_IOCTL_ADD_CTX)] = { tdfx_addctx, 1, 1 },
- [DRM_IOCTL_NR(DRM_IOCTL_RM_CTX)] = { tdfx_rmctx, 1, 1 },
- [DRM_IOCTL_NR(DRM_IOCTL_MOD_CTX)] = { tdfx_modctx, 1, 1 },
- [DRM_IOCTL_NR(DRM_IOCTL_GET_CTX)] = { tdfx_getctx, 1, 0 },
- [DRM_IOCTL_NR(DRM_IOCTL_SWITCH_CTX)] = { tdfx_switchctx, 1, 1 },
- [DRM_IOCTL_NR(DRM_IOCTL_NEW_CTX)] = { tdfx_newctx, 1, 1 },
- [DRM_IOCTL_NR(DRM_IOCTL_RES_CTX)] = { tdfx_resctx, 1, 0 },
- [DRM_IOCTL_NR(DRM_IOCTL_ADD_DRAW)] = { drm_adddraw, 1, 1 },
- [DRM_IOCTL_NR(DRM_IOCTL_RM_DRAW)] = { drm_rmdraw, 1, 1 },
- [DRM_IOCTL_NR(DRM_IOCTL_LOCK)] = { tdfx_lock, 1, 0 },
- [DRM_IOCTL_NR(DRM_IOCTL_UNLOCK)] = { tdfx_unlock, 1, 0 },
- [DRM_IOCTL_NR(DRM_IOCTL_FINISH)] = { drm_finish, 1, 0 },
-#ifdef DRM_AGP
- [DRM_IOCTL_NR(DRM_IOCTL_AGP_ACQUIRE)] = {drm_agp_acquire, 1, 1},
- [DRM_IOCTL_NR(DRM_IOCTL_AGP_RELEASE)] = {drm_agp_release, 1, 1},
- [DRM_IOCTL_NR(DRM_IOCTL_AGP_ENABLE)] = {drm_agp_enable, 1, 1},
- [DRM_IOCTL_NR(DRM_IOCTL_AGP_INFO)] = {drm_agp_info, 1, 1},
- [DRM_IOCTL_NR(DRM_IOCTL_AGP_ALLOC)] = {drm_agp_alloc, 1, 1},
- [DRM_IOCTL_NR(DRM_IOCTL_AGP_FREE)] = {drm_agp_free, 1, 1},
- [DRM_IOCTL_NR(DRM_IOCTL_AGP_BIND)] = {drm_agp_unbind, 1, 1},
- [DRM_IOCTL_NR(DRM_IOCTL_AGP_UNBIND)] = {drm_agp_bind, 1, 1},
-#endif
-};
-#define TDFX_IOCTL_COUNT DRM_ARRAY_SIZE(tdfx_ioctls)
-
-static int
-tdfx_setup(drm_device_t *dev)
-{
- int i;
-
- device_busy(dev->device);
-
- atomic_set(&dev->ioctl_count, 0);
- atomic_set(&dev->vma_count, 0);
- dev->buf_use = 0;
- atomic_set(&dev->buf_alloc, 0);
-
- atomic_set(&dev->total_open, 0);
- atomic_set(&dev->total_close, 0);
- atomic_set(&dev->total_ioctl, 0);
- atomic_set(&dev->total_irq, 0);
- atomic_set(&dev->total_ctx, 0);
- atomic_set(&dev->total_locks, 0);
- atomic_set(&dev->total_unlocks, 0);
- atomic_set(&dev->total_contends, 0);
- atomic_set(&dev->total_sleeps, 0);
-
- for (i = 0; i < DRM_HASH_SIZE; i++) {
- dev->magiclist[i].head = NULL;
- dev->magiclist[i].tail = NULL;
- }
- dev->maplist = NULL;
- dev->map_count = 0;
- dev->vmalist = NULL;
- dev->lock.hw_lock = NULL;
- dev->lock.lock_queue = 0;
- dev->queue_count = 0;
- dev->queue_reserved = 0;
- dev->queue_slots = 0;
- dev->queuelist = NULL;
- dev->irq = 0;
- dev->context_flag = 0;
- dev->interrupt_flag = 0;
- dev->dma = 0;
- dev->dma_flag = 0;
- dev->last_context = 0;
- dev->last_switch = 0;
- dev->last_checked = 0;
- callout_init(&dev->timer);
- dev->context_wait = 0;
-
- timespecclear(&dev->ctx_start);
- timespecclear(&dev->lck_start);
-
- dev->buf_rp = dev->buf;
- dev->buf_wp = dev->buf;
- dev->buf_end = dev->buf + DRM_BSZ;
- bzero(&dev->buf_sel, sizeof dev->buf_sel);
- dev->buf_sigio = NULL;
- dev->buf_readers = 0;
- dev->buf_writers = 0;
- dev->buf_selecting = 0;
-
- tdfx_res_ctx.handle=-1;
-
- DRM_DEBUG("\n");
-
- /* The kernel's context could be created here, but is now created
- in drm_dma_enqueue. This is more resource-efficient for
- hardware that does not do DMA, but may mean that
- drm_select_queue fails between the time the interrupt is
- initialized and the time the queues are initialized. */
-
- return 0;
-}
-
-
-static int
-tdfx_takedown(drm_device_t *dev)
-{
- int i;
- drm_magic_entry_t *pt, *next;
- drm_map_t *map;
- drm_vma_entry_t *vma, *vma_next;
-
- DRM_DEBUG("\n");
-
- lockmgr(&dev->dev_lock, LK_EXCLUSIVE, 0, curproc);
- callout_stop(&dev->timer);
-
- if (dev->devname) {
- drm_free(dev->devname, strlen(dev->devname)+1, DRM_MEM_DRIVER);
- dev->devname = NULL;
- }
-
- if (dev->unique) {
- drm_free(dev->unique, strlen(dev->unique)+1, DRM_MEM_DRIVER);
- dev->unique = NULL;
- dev->unique_len = 0;
- }
- /* Clear pid list */
- for (i = 0; i < DRM_HASH_SIZE; i++) {
- for (pt = dev->magiclist[i].head; pt; pt = next) {
- next = pt->next;
- drm_free(pt, sizeof(*pt), DRM_MEM_MAGIC);
- }
- dev->magiclist[i].head = dev->magiclist[i].tail = NULL;
- }
-#ifdef DRM_AGP
- /* Clear AGP information */
- if (dev->agp) {
- drm_agp_mem_t *temp;
- drm_agp_mem_t *temp_next;
-
- temp = dev->agp->memory;
- while(temp != NULL) {
- temp_next = temp->next;
- drm_free_agp(temp->handle, temp->pages);
- drm_free(temp, sizeof(*temp), DRM_MEM_AGPLISTS);
- temp = temp_next;
- }
-
- if (dev->agp->acquired)
- agp_release(dev->agp->agpdev);
- drm_free(dev->agp, sizeof(*dev->agp), DRM_MEM_AGPLISTS);
- dev->agp = NULL;
- }
-#endif
-
- /* Clear vma list (only built for debugging) */
- if (dev->vmalist) {
- for (vma = dev->vmalist; vma; vma = vma_next) {
- vma_next = vma->next;
- drm_free(vma, sizeof(*vma), DRM_MEM_VMAS);
- }
- dev->vmalist = NULL;
- }
-
- /* Clear map area and mtrr information */
- if (dev->maplist) {
- for (i = 0; i < dev->map_count; i++) {
- map = dev->maplist[i];
- switch (map->type) {
- case _DRM_REGISTERS:
- case _DRM_FRAME_BUFFER:
-#ifdef CONFIG_MTRR
- if (map->mtrr >= 0) {
- int retcode;
- retcode = mtrr_del(map->mtrr,
- map->offset,
- map->size);
- DRM_DEBUG("mtrr_del = %d\n", retcode);
- }
-#endif
- drm_ioremapfree(map->handle, map->size);
- break;
- case _DRM_SHM:
- drm_free_pages((unsigned long)map->handle,
- drm_order(map->size)
- - PAGE_SHIFT,
- DRM_MEM_SAREA);
- break;
- case _DRM_AGP:
- break; /* XXX */
- }
- drm_free(map, sizeof(*map), DRM_MEM_MAPS);
- }
- drm_free(dev->maplist,
- dev->map_count * sizeof(*dev->maplist),
- DRM_MEM_MAPS);
- dev->maplist = NULL;
- dev->map_count = 0;
- }
-
- if (dev->lock.hw_lock) {
- dev->lock.hw_lock = NULL; /* SHM removed */
- dev->lock.pid = 0;
- wakeup(&dev->lock.lock_queue);
- }
- lockmgr(&dev->dev_lock, LK_RELEASE, 0, curproc);
-
- return 0;
-}
-
-/* tdfx_init is called via tdfx_attach at module load time, */
-
-static int
-tdfx_init(device_t nbdev)
-{
- drm_device_t *dev = device_get_softc(nbdev);
- int retcode;
-
- DRM_DEBUG("\n");
-
- memset((void *)dev, 0, sizeof(*dev));
- simple_lock_init(&dev->count_lock);
- lockinit(&dev->dev_lock, PZERO, "drmlk", 0, 0);
-
-#if 0
- drm_parse_options(tdfx);
-#endif
-
- dev->device = nbdev;
- dev->devnode = make_dev(&tdfx_cdevsw,
- device_get_unit(nbdev),
- DRM_DEV_UID,
- DRM_DEV_GID,
- DRM_DEV_MODE,
- TDFX_NAME);
- dev->name = TDFX_NAME;
-
- drm_mem_init();
- drm_sysctl_init(dev);
- TAILQ_INIT(&dev->files);
-
-#ifdef DRM_AGP
- dev->agp = drm_agp_init();
-#endif
- if((retcode = drm_ctxbitmap_init(dev))) {
- DRM_ERROR("Cannot allocate memory for context bitmap.\n");
- drm_sysctl_cleanup(dev);
- tdfx_takedown(dev);
- return retcode;
- }
-
- DRM_INFO("Initialized %s %d.%d.%d %s on minor %d\n",
- TDFX_NAME,
- TDFX_MAJOR,
- TDFX_MINOR,
- TDFX_PATCHLEVEL,
- TDFX_DATE,
- device_get_unit(nbdev));
-
- return 0;
-}
-
-/* tdfx_cleanup is called via tdfx_detach at module unload time. */
-
-static void
-tdfx_cleanup(device_t nbdev)
-{
- drm_device_t *dev = device_get_softc(nbdev);
-
- DRM_DEBUG("\n");
-
- drm_sysctl_cleanup(dev);
- destroy_dev(dev->devnode);
-
- DRM_INFO("Module unloaded\n");
-
- drm_ctxbitmap_cleanup(dev);
- tdfx_takedown(dev);
-}
-
-static int
-tdfx_version(dev_t kdev, u_long cmd, caddr_t data, int flags, struct proc *p)
-{
- drm_version_t version;
- int len;
-
- version = *(drm_version_t *) data;
-
-#define DRM_COPY(name,value) \
- len = strlen(value); \
- if (len > name##_len) len = name##_len; \
- name##_len = strlen(value); \
- if (len && name) { \
- int error = copyout(value, name, len); \
- if (error) return error; \
- }
-
- version.version_major = TDFX_MAJOR;
- version.version_minor = TDFX_MINOR;
- version.version_patchlevel = TDFX_PATCHLEVEL;
-
- DRM_COPY(version.name, TDFX_NAME);
- DRM_COPY(version.date, TDFX_DATE);
- DRM_COPY(version.desc, TDFX_DESC);
-
- *(drm_version_t *) data = version;
- return 0;
-}
-
-static int
-tdfx_open(dev_t kdev, int flags, int fmt, struct proc *p)
-{
- drm_device_t *dev = TDFX_SOFTC(minor(kdev));
- int retcode = 0;
-
- DRM_DEBUG("open_count = %d\n", dev->open_count);
-
- device_busy(dev->device);
- if (!(retcode = drm_open_helper(kdev, flags, fmt, p, dev))) {
- atomic_inc(&dev->total_open);
- simple_lock(&dev->count_lock);
- if (!dev->open_count++) {
- simple_unlock(&dev->count_lock);
- retcode = tdfx_setup(dev);
- }
- simple_unlock(&dev->count_lock);
- }
- device_unbusy(dev->device);
-
- return retcode;
-}
-
-static int
-tdfx_close(dev_t kdev, int flags, int fmt, struct proc *p)
-{
- drm_device_t *dev = kdev->si_drv1;
- int retcode = 0;
-
- DRM_DEBUG("open_count = %d\n", dev->open_count);
- if (!(retcode = drm_close(kdev, flags, fmt, p))) {
- atomic_inc(&dev->total_close);
- simple_lock(&dev->count_lock);
- if (!--dev->open_count) {
- if (atomic_read(&dev->ioctl_count) || dev->blocked) {
- DRM_ERROR("Device busy: %d %d\n",
- atomic_read(&dev->ioctl_count),
- dev->blocked);
- simple_unlock(&dev->count_lock);
- return EBUSY;
- }
- simple_unlock(&dev->count_lock);
- device_unbusy(dev->device);
- return tdfx_takedown(dev);
- }
- simple_unlock(&dev->count_lock);
- }
-
- return retcode;
-}
-
-/* tdfx_ioctl is called whenever a process performs an ioctl on /dev/drm. */
-
-static int
-tdfx_ioctl(dev_t kdev, u_long cmd, caddr_t data, int flags, struct proc *p)
-{
- int nr = DRM_IOCTL_NR(cmd);
- drm_device_t *dev = kdev->si_drv1;
- drm_file_t *priv;
- int retcode = 0;
- drm_ioctl_desc_t *ioctl;
- d_ioctl_t *func;
-
- DRM_DEBUG("dev=%p\n", dev);
- priv = drm_find_file_by_proc(dev, p);
- if (!priv) {
- DRM_DEBUG("can't find authenticator\n");
- return EINVAL;
- }
-
- atomic_inc(&dev->ioctl_count);
- atomic_inc(&dev->total_ioctl);
- ++priv->ioctl_count;
-
- DRM_DEBUG("pid = %d, cmd = 0x%02lx, nr = 0x%02x, auth = %d\n",
- p->p_pid, cmd, nr, priv->authenticated);
-
- switch (cmd) {
- case FIONBIO:
- atomic_dec(&dev->ioctl_count);
- return 0;
-
- case FIOASYNC:
- atomic_dec(&dev->ioctl_count);
- dev->flags |= FASYNC;
- return 0;
-
- case FIOSETOWN:
- atomic_dec(&dev->ioctl_count);
- return fsetown(*(int *)data, &dev->buf_sigio);
-
- case FIOGETOWN:
- atomic_dec(&dev->ioctl_count);
- *(int *) data = fgetown(dev->buf_sigio);
- return 0;
- }
-
- if (nr >= TDFX_IOCTL_COUNT) {
- retcode = EINVAL;
- } else {
- ioctl = &tdfx_ioctls[nr];
- func = ioctl->func;
-
- if (!func) {
- DRM_DEBUG("no function\n");
- retcode = EINVAL;
- } else if ((ioctl->root_only && suser(p))
- || (ioctl->auth_needed && !priv->authenticated)) {
- retcode = EACCES;
- } else {
- retcode = (func)(kdev, cmd, data, flags, p);
- }
- }
-
- atomic_dec(&dev->ioctl_count);
- return retcode;
-}
-
-static int
-tdfx_lock(dev_t kdev, u_long cmd, caddr_t data, int flags, struct proc *p)
-{
- drm_device_t *dev = kdev->si_drv1;
- int ret = 0;
- drm_lock_t lock;
-#if DRM_DMA_HISTOGRAM
-
- getnanotime(&dev->lck_start);
-#endif
+#include "tdfx.h"
+#include "drmP.h"
- lock = *(drm_lock_t *) data;
+#define DRIVER_AUTHOR "VA Linux Systems Inc."
- if (lock.context == DRM_KERNEL_CONTEXT) {
- DRM_ERROR("Process %d using kernel context %d\n",
- p->p_pid, lock.context);
- return EINVAL;
- }
+#define DRIVER_NAME "tdfx"
+#define DRIVER_DESC "3dfx Banshee/Voodoo3+"
+#define DRIVER_DATE "20010216"
- DRM_DEBUG("%d (pid %d) requests lock (0x%08x), flags = 0x%08x\n",
- lock.context, p->p_pid, dev->lock.hw_lock->lock,
- lock.flags);
+#define DRIVER_MAJOR 1
+#define DRIVER_MINOR 0
+#define DRIVER_PATCHLEVEL 0
-#if 0
- /* dev->queue_count == 0 right now for
- tdfx. FIXME? */
- if (lock.context < 0 || lock.context >= dev->queue_count)
- return EINVAL;
+#ifndef PCI_VENDOR_ID_3DFX
+#define PCI_VENDOR_ID_3DFX 0x121A
#endif
-
- if (!ret) {
-#if 0
- if (_DRM_LOCKING_CONTEXT(dev->lock.hw_lock->lock)
- != lock.context) {
- long j = ticks - dev->lock.lock_time;
-
- if (lock.context == tdfx_res_ctx.handle &&
- j >= 0 && j < DRM_LOCK_SLICE) {
- /* Can't take lock if we just had it and
- there is contention. */
- DRM_DEBUG("%d (pid %d) delayed j=%d dev=%d ticks=%d\n",
- lock.context, p->p_pid, j,
- dev->lock.lock_time, ticks);
- ret = tsleep(&never, PZERO|PCATCH, "drmlk1",
- DRM_LOCK_SLICE - j);
- if (ret)
- return ret;
- DRM_DEBUG("ticks=%d\n", ticks);
- }
- }
+#ifndef PCI_DEVICE_ID_3DFX_VOODOO5
+#define PCI_DEVICE_ID_3DFX_VOODOO5 0x0009
#endif
- for (;;) {
- if (!dev->lock.hw_lock) {
- /* Device has been unregistered */
- ret = EINTR;
- break;
- }
- if (drm_lock_take(&dev->lock.hw_lock->lock,
- lock.context)) {
- dev->lock.pid = p->p_pid;
- dev->lock.lock_time = ticks;
- atomic_inc(&dev->total_locks);
- break; /* Got lock */
- }
-
- /* Contention */
- atomic_inc(&dev->total_sleeps);
- ret = tsleep(&dev->lock.lock_queue,
- PZERO|PCATCH,
- "drmlk2",
- 0);
- if (ret)
- break;
- }
- }
-
-#if 0
- if (!ret && dev->last_context != lock.context &&
- lock.context != tdfx_res_ctx.handle &&
- dev->last_context != tdfx_res_ctx.handle) {
- add_wait_queue(&dev->context_wait, &entry);
- current->state = TASK_INTERRUPTIBLE;
- /* PRE: dev->last_context != lock.context */
- tdfx_context_switch(dev, dev->last_context, lock.context);
- /* POST: we will wait for the context
- switch and will dispatch on a later call
- when dev->last_context == lock.context
- NOTE WE HOLD THE LOCK THROUGHOUT THIS
- TIME! */
- current->policy |= SCHED_YIELD;
- schedule();
- current->state = TASK_RUNNING;
- remove_wait_queue(&dev->context_wait, &entry);
- if (signal_pending(current)) {
- ret = EINTR;
- } else if (dev->last_context != lock.context) {
- DRM_ERROR("Context mismatch: %d %d\n",
- dev->last_context, lock.context);
- }
- }
+#ifndef PCI_DEVICE_ID_3DFX_VOODOO4
+#define PCI_DEVICE_ID_3DFX_VOODOO4 0x0007
#endif
-
- if (!ret) {
- if (lock.flags & _DRM_LOCK_READY) {
- /* Wait for space in DMA/FIFO */
- }
- if (lock.flags & _DRM_LOCK_QUIESCENT) {
- /* Make hardware quiescent */
-#if 0
- tdfx_quiescent(dev);
+#ifndef PCI_DEVICE_ID_3DFX_VOODOO3_3000 /* Voodoo3 3000 */
+#define PCI_DEVICE_ID_3DFX_VOODOO3_3000 0x0005
#endif
- }
- }
-
-#if 0
- DRM_ERROR("pid = %5d, old counter = %5ld\n",
- p->p_pid, current->counter);
+#ifndef PCI_DEVICE_ID_3DFX_VOODOO3_2000 /* Voodoo3 3000 */
+#define PCI_DEVICE_ID_3DFX_VOODOO3_2000 0x0004
#endif
-#if 0
- while (current->counter > 25)
- current->counter >>= 1; /* decrease time slice */
- DRM_ERROR("pid = %5d, new counter = %5ld\n",
- p->p_pid, current->counter);
+#ifndef PCI_DEVICE_ID_3DFX_BANSHEE
+#define PCI_DEVICE_ID_3DFX_BANSHEE 0x0003
#endif
- DRM_DEBUG("%d %s\n", lock.context, ret ? "interrupted" : "has lock");
-#if DRM_DMA_HISTOGRAM
- {
- struct timespec ts;
- getnanotime(&ts);
- timespecsub(&ts, &dev->lck_start);
- atomic_inc(&dev->histo.lhld[drm_histogram_slot(&ts)]);
- }
-#endif
-
- return ret;
-}
+/* List acquired from http://www.yourvote.com/pci/pcihdr.h and xc/xc/programs/Xserver/hw/xfree86/common/xf86PciInfo.h
+ * Please report to anholt@teleport.com inaccuracies or if a chip you have works that is marked unsupported here.
+ */
+drm_chipinfo_t DRM(devicelist)[] = {
+ {0x121a, 0x0003, 1, "3dfx Voodoo Banshee"},
+ {0x121a, 0x0004, 1, "3dfx Voodoo3 2000"},
+ {0x121a, 0x0005, 1, "3dfx Voodoo3 3000"},
+ {0x121a, 0x0007, 1, "3dfx Voodoo4"},
+ {0x121a, 0x0009, 1, "3dfx Voodoo5"},
+ {0, 0, 0, NULL}
+};
-static int
-tdfx_unlock(dev_t kdev, u_long cmd, caddr_t data, int flags, struct proc *p)
-{
- drm_device_t *dev = kdev->si_drv1;
- drm_lock_t lock;
+#include "drm_auth.h"
+#include "drm_bufs.h"
+#include "drm_context.h"
+#include "drm_dma.h"
+#include "drm_drawable.h"
+#include "drm_drv.h"
- lock = *(drm_lock_t *) data;
-
- if (lock.context == DRM_KERNEL_CONTEXT) {
- DRM_ERROR("Process %d using kernel context %d\n",
- p->p_pid, lock.context);
- return EINVAL;
- }
- DRM_DEBUG("%d frees lock (%d holds)\n",
- lock.context,
- _DRM_LOCKING_CONTEXT(dev->lock.hw_lock->lock));
- atomic_inc(&dev->total_unlocks);
- if (_DRM_LOCK_IS_CONT(dev->lock.hw_lock->lock))
- atomic_inc(&dev->total_contends);
- drm_lock_transfer(dev, &dev->lock.hw_lock->lock, DRM_KERNEL_CONTEXT);
- /* FIXME: Try to send data to card here */
- if (!dev->context_flag) {
- if (drm_lock_free(dev, &dev->lock.hw_lock->lock,
- DRM_KERNEL_CONTEXT)) {
- DRM_ERROR("\n");
- }
- }
+#include "drm_fops.h"
+#include "drm_init.h"
+#include "drm_ioctl.h"
+#include "drm_lock.h"
+#include "drm_memory.h"
+#include "drm_vm.h"
+#include "drm_sysctl.h"
- return 0;
-}
+DRIVER_MODULE(tdfx, pci, tdfx_driver, tdfx_devclass, 0, 0);
diff --git a/libdrm/xf86drm.c b/libdrm/xf86drm.c
index 4916e032..1d26b6e6 100644
--- a/libdrm/xf86drm.c
+++ b/libdrm/xf86drm.c
@@ -27,7 +27,7 @@
* Authors: Rickard E. (Rik) Faith <faith@valinux.com>
* Kevin E. Martin <martin@valinux.com>
*
- * $XFree86: xc/programs/Xserver/hw/xfree86/os-support/linux/drm/xf86drm.c,v 1.24 2001/08/18 02:51:13 dawes Exp $
+ * $XFree86: xc/programs/Xserver/hw/xfree86/os-support/linux/drm/xf86drm.c,v 1.25 2001/08/27 17:40:59 dawes Exp $
*
*/
@@ -62,7 +62,7 @@
extern int xf86InstallSIGIOHandler(int fd, void (*f)(int, void *), void *);
extern int xf86RemoveSIGIOHandler(int fd);
# else
-# include <Xlibint.h>
+# include <X11/Xlibint.h>
# define _DRM_MALLOC Xmalloc
# define _DRM_FREE Xfree
# endif
diff --git a/linux-core/Makefile.kernel b/linux-core/Makefile.kernel
index e2630989..1060d4cb 100644
--- a/linux-core/Makefile.kernel
+++ b/linux-core/Makefile.kernel
@@ -3,13 +3,14 @@
# Direct Rendering Infrastructure (DRI) in XFree86 4.1.0 and higher.
O_TARGET := drm.o
-list-multi := gamma.o tdfx.o r128.o mga.o i810.o ffb.o radeon.o
+list-multi := gamma.o tdfx.o r128.o mga.o i810.o i830.o ffb.o radeon.o
gamma-objs := gamma_drv.o gamma_dma.o
tdfx-objs := tdfx_drv.o
r128-objs := r128_drv.o r128_cce.o r128_state.o
mga-objs := mga_drv.o mga_dma.o mga_state.o mga_warp.o
i810-objs := i810_drv.o i810_dma.o
+i830-objs := i830_drv.o i830_dma.o
radeon-objs := radeon_drv.o radeon_cp.o radeon_state.o
ffb-objs := ffb_drv.o ffb_context.o
@@ -19,6 +20,7 @@ obj-$(CONFIG_DRM_R128) += r128.o
obj-$(CONFIG_DRM_RADEON)+= radeon.o
obj-$(CONFIG_DRM_MGA) += mga.o
obj-$(CONFIG_DRM_I810) += i810.o
+obj-$(CONFIG_DRM_I830) += i830.o
obj-$(CONFIG_DRM_FFB) += ffb.o
include $(TOPDIR)/Rules.make
@@ -35,6 +37,9 @@ mga.o: $(mga-objs) $(lib)
i810.o: $(i810-objs) $(lib)
$(LD) -r -o $@ $(i810-objs) $(lib)
+i830.o: $(i830-objs) $(lib)
+ $(LD) -r -o $@ $(i830-objs) $(lib)
+
r128.o: $(r128-objs) $(lib)
$(LD) -r -o $@ $(r128-objs) $(lib)
diff --git a/linux-core/ati_pcigart.c b/linux-core/ati_pcigart.c
index a794b3e0..5851b72f 100644
--- a/linux-core/ati_pcigart.c
+++ b/linux-core/ati_pcigart.c
@@ -36,7 +36,7 @@
#elif PAGE_SIZE == 4096
# define ATI_PCIGART_TABLE_ORDER 3
# define ATI_PCIGART_TABLE_PAGES (1 << 3)
-#elif
+#else
# error - PAGE_SIZE not 8K or 4K
#endif
@@ -57,7 +57,7 @@ static unsigned long DRM(ati_alloc_pcigart_table)( void )
page = virt_to_page( address );
- for ( i = 0 ; i <= ATI_PCIGART_TABLE_PAGES ; i++, page++ ) {
+ for ( i = 0 ; i < ATI_PCIGART_TABLE_PAGES ; i++, page++ ) {
atomic_inc( &page->count );
SetPageReserved( page );
}
@@ -74,7 +74,7 @@ static void DRM(ati_free_pcigart_table)( unsigned long address )
page = virt_to_page( address );
- for ( i = 0 ; i <= ATI_PCIGART_TABLE_PAGES ; i++, page++ ) {
+ for ( i = 0 ; i < ATI_PCIGART_TABLE_PAGES ; i++, page++ ) {
atomic_dec( &page->count );
ClearPageReserved( page );
}
@@ -103,7 +103,6 @@ int DRM(ati_pcigart_init)( drm_device_t *dev,
goto done;
}
-#if defined(__alpha__) && (LINUX_VERSION_CODE >= 0x020400)
if ( !dev->pdev ) {
DRM_ERROR( "PCI device unknown!\n" );
goto done;
@@ -118,9 +117,6 @@ int DRM(ati_pcigart_init)( drm_device_t *dev,
address = 0;
goto done;
}
-#else
- bus_address = virt_to_bus( (void *)address );
-#endif
pci_gart = (u32 *)address;
@@ -130,7 +126,6 @@ int DRM(ati_pcigart_init)( drm_device_t *dev,
memset( pci_gart, 0, ATI_MAX_PCIGART_PAGES * sizeof(u32) );
for ( i = 0 ; i < pages ; i++ ) {
-#if defined(__alpha__) && (LINUX_VERSION_CODE >= 0x020400)
/* we need to support large memory configurations */
entry->busaddr[i] = pci_map_single(dev->pdev,
page_address( entry->pagelist[i] ),
@@ -144,9 +139,7 @@ int DRM(ati_pcigart_init)( drm_device_t *dev,
goto done;
}
page_base = (u32) entry->busaddr[i];
-#else
- page_base = page_to_bus( entry->pagelist[i] );
-#endif
+
for (j = 0; j < (PAGE_SIZE / ATI_PCIGART_PAGE_SIZE); j++) {
*pci_gart++ = cpu_to_le32( page_base );
page_base += ATI_PCIGART_PAGE_SIZE;
@@ -155,7 +148,7 @@ int DRM(ati_pcigart_init)( drm_device_t *dev,
ret = 1;
-#if __i386__
+#if defined(__i386__) || defined(__x86_64__)
asm volatile ( "wbinvd" ::: "memory" );
#else
mb();
@@ -171,7 +164,6 @@ int DRM(ati_pcigart_cleanup)( drm_device_t *dev,
unsigned long addr,
dma_addr_t bus_addr)
{
-#if defined(__alpha__) && (LINUX_VERSION_CODE >= 0x020400)
drm_sg_mem_t *entry = dev->sg;
unsigned long pages;
int i;
@@ -197,8 +189,6 @@ int DRM(ati_pcigart_cleanup)( drm_device_t *dev,
}
}
-#endif
-
if ( addr ) {
DRM(ati_free_pcigart_table)( addr );
}
diff --git a/linux-core/drmP.h b/linux-core/drmP.h
index aac5c58a..4786ba9e 100644
--- a/linux-core/drmP.h
+++ b/linux-core/drmP.h
@@ -66,13 +66,8 @@
#include <linux/types.h>
#include <linux/agp_backend.h>
#endif
-#if LINUX_VERSION_CODE >= 0x020100 /* KERNEL_VERSION(2,1,0) */
#include <linux/tqueue.h>
#include <linux/poll.h>
-#endif
-#if LINUX_VERSION_CODE < 0x020400
-#include "compat-pre24.h"
-#endif
#include <asm/pgalloc.h>
#include "drm.h"
@@ -154,180 +149,7 @@
#define DRM_MAX_CTXBITMAP (PAGE_SIZE * 8)
- /* Backward compatibility section */
- /* _PAGE_WT changed to _PAGE_PWT in 2.2.6 */
-#ifndef _PAGE_PWT
-#define _PAGE_PWT _PAGE_WT
-#endif
- /* Wait queue declarations changed in 2.3.1 */
-#ifndef DECLARE_WAITQUEUE
-#define DECLARE_WAITQUEUE(w,c) struct wait_queue w = { c, NULL }
-typedef struct wait_queue *wait_queue_head_t;
-#define init_waitqueue_head(q) *q = NULL;
-#endif
-
- /* _PAGE_4M changed to _PAGE_PSE in 2.3.23 */
-#ifndef _PAGE_PSE
-#define _PAGE_PSE _PAGE_4M
-#endif
-
- /* vm_offset changed to vm_pgoff in 2.3.25 */
-#if LINUX_VERSION_CODE < 0x020319
-#define VM_OFFSET(vma) ((vma)->vm_offset)
-#else
#define VM_OFFSET(vma) ((vma)->vm_pgoff << PAGE_SHIFT)
-#endif
-
- /* *_nopage return values defined in 2.3.26 */
-#ifndef NOPAGE_SIGBUS
-#define NOPAGE_SIGBUS 0
-#endif
-#ifndef NOPAGE_OOM
-#define NOPAGE_OOM 0
-#endif
-
- /* module_init/module_exit added in 2.3.13 */
-#ifndef module_init
-#define module_init(x) int init_module(void) { return x(); }
-#endif
-#ifndef module_exit
-#define module_exit(x) void cleanup_module(void) { x(); }
-#endif
-
- /* Generic cmpxchg added in 2.3.x */
-#ifndef __HAVE_ARCH_CMPXCHG
- /* Include this here so that driver can be
- used with older kernels. */
-#if defined(__alpha__)
-static __inline__ unsigned long
-__cmpxchg_u32(volatile int *m, int old, int new)
-{
- unsigned long prev, cmp;
-
- __asm__ __volatile__(
- "1: ldl_l %0,%5\n"
- " cmpeq %0,%3,%1\n"
- " beq %1,2f\n"
- " mov %4,%1\n"
- " stl_c %1,%2\n"
- " beq %1,3f\n"
- "2: mb\n"
- ".subsection 2\n"
- "3: br 1b\n"
- ".previous"
- : "=&r"(prev), "=&r"(cmp), "=m"(*m)
- : "r"((long) old), "r"(new), "m"(*m)
- : "memory" );
-
- return prev;
-}
-
-static __inline__ unsigned long
-__cmpxchg_u64(volatile long *m, unsigned long old, unsigned long new)
-{
- unsigned long prev, cmp;
-
- __asm__ __volatile__(
- "1: ldq_l %0,%5\n"
- " cmpeq %0,%3,%1\n"
- " beq %1,2f\n"
- " mov %4,%1\n"
- " stq_c %1,%2\n"
- " beq %1,3f\n"
- "2: mb\n"
- ".subsection 2\n"
- "3: br 1b\n"
- ".previous"
- : "=&r"(prev), "=&r"(cmp), "=m"(*m)
- : "r"((long) old), "r"(new), "m"(*m)
- : "memory" );
-
- return prev;
-}
-
-static __inline__ unsigned long
-__cmpxchg(volatile void *ptr, unsigned long old, unsigned long new, int size)
-{
- switch (size) {
- case 4:
- return __cmpxchg_u32(ptr, old, new);
- case 8:
- return __cmpxchg_u64(ptr, old, new);
- }
- return old;
-}
-#define cmpxchg(ptr,o,n) \
- ({ \
- __typeof__(*(ptr)) _o_ = (o); \
- __typeof__(*(ptr)) _n_ = (n); \
- (__typeof__(*(ptr))) __cmpxchg((ptr), (unsigned long)_o_, \
- (unsigned long)_n_, sizeof(*(ptr))); \
- })
-
-#elif __i386__
-static inline unsigned long __cmpxchg(volatile void *ptr, unsigned long old,
- unsigned long new, int size)
-{
- unsigned long prev;
- switch (size) {
- case 1:
- __asm__ __volatile__(LOCK_PREFIX "cmpxchgb %b1,%2"
- : "=a"(prev)
- : "q"(new), "m"(*__xg(ptr)), "0"(old)
- : "memory");
- return prev;
- case 2:
- __asm__ __volatile__(LOCK_PREFIX "cmpxchgw %w1,%2"
- : "=a"(prev)
- : "q"(new), "m"(*__xg(ptr)), "0"(old)
- : "memory");
- return prev;
- case 4:
- __asm__ __volatile__(LOCK_PREFIX "cmpxchgl %1,%2"
- : "=a"(prev)
- : "q"(new), "m"(*__xg(ptr)), "0"(old)
- : "memory");
- return prev;
- }
- return old;
-}
-
-#elif defined(__powerpc__)
-extern void __cmpxchg_called_with_bad_pointer(void);
-static inline unsigned long __cmpxchg(volatile void *ptr, unsigned long old,
- unsigned long new, int size)
-{
- unsigned long prev;
-
- switch (size) {
- case 4:
- __asm__ __volatile__(
- "sync;"
- "0: lwarx %0,0,%1 ;"
- " cmpl 0,%0,%3;"
- " bne 1f;"
- " stwcx. %2,0,%1;"
- " bne- 0b;"
- "1: "
- "sync;"
- : "=&r"(prev)
- : "r"(ptr), "r"(new), "r"(old)
- : "cr0", "memory");
- return prev;
- }
- __cmpxchg_called_with_bad_pointer();
- return old;
-}
-
-#endif /* i386, powerpc & alpha */
-
-#ifndef __alpha__
-#define cmpxchg(ptr,o,n) \
- ((__typeof__(*(ptr)))__cmpxchg((ptr),(unsigned long)(o), \
- (unsigned long)(n),sizeof(*(ptr))))
-#endif
-
-#endif /* !__HAVE_ARCH_CMPXCHG */
/* Macros to make printk easier */
#define DRM_ERROR(fmt, arg...) \
@@ -621,6 +443,8 @@ typedef struct drm_agp_head {
int acquired;
unsigned long base;
int agp_mtrr;
+ int cant_use_aperture;
+ unsigned long page_mask;
} drm_agp_head_t;
#endif
@@ -629,9 +453,7 @@ typedef struct drm_sg_mem {
void *virtual;
int pages;
struct page **pagelist;
-#if defined(__alpha__) && (LINUX_VERSION_CODE >= 0x020400)
dma_addr_t *busaddr;
-#endif
} drm_sg_mem_t;
typedef struct drm_sigdata {
@@ -722,8 +544,8 @@ typedef struct drm_device {
#if __REALLY_HAVE_AGP
drm_agp_head_t *agp;
#endif
-#ifdef __alpha__
struct pci_dev *pdev;
+#ifdef __alpha__
#if LINUX_VERSION_CODE < 0x020403
struct pci_controler *hose;
#else
@@ -772,21 +594,6 @@ extern unsigned int DRM(poll)(struct file *filp,
struct poll_table_struct *wait);
/* Mapping support (drm_vm.h) */
-#if LINUX_VERSION_CODE < 0x020317
-extern unsigned long DRM(vm_nopage)(struct vm_area_struct *vma,
- unsigned long address,
- int write_access);
-extern unsigned long DRM(vm_shm_nopage)(struct vm_area_struct *vma,
- unsigned long address,
- int write_access);
-extern unsigned long DRM(vm_dma_nopage)(struct vm_area_struct *vma,
- unsigned long address,
- int write_access);
-extern unsigned long DRM(vm_sg_nopage)(struct vm_area_struct *vma,
- unsigned long address,
- int write_access);
-#else
- /* Return type changed in 2.3.23 */
extern struct page *DRM(vm_nopage)(struct vm_area_struct *vma,
unsigned long address,
int write_access);
@@ -799,7 +606,6 @@ extern struct page *DRM(vm_dma_nopage)(struct vm_area_struct *vma,
extern struct page *DRM(vm_sg_nopage)(struct vm_area_struct *vma,
unsigned long address,
int write_access);
-#endif
extern void DRM(vm_open)(struct vm_area_struct *vma);
extern void DRM(vm_close)(struct vm_area_struct *vma);
extern void DRM(vm_shm_close)(struct vm_area_struct *vma);
diff --git a/linux-core/drm_agpsupport.c b/linux-core/drm_agpsupport.c
index 66d1defc..fc0b29aa 100644
--- a/linux-core/drm_agpsupport.c
+++ b/linux-core/drm_agpsupport.c
@@ -35,12 +35,8 @@
#if __REALLY_HAVE_AGP
-#if LINUX_VERSION_CODE < 0x020400
-#include "agpsupport-pre24.h"
-#else
#define DRM_AGP_GET (drm_agp_t *)inter_module_get("drm_agp")
#define DRM_AGP_PUT inter_module_put("drm_agp")
-#endif
static const drm_agp_t *drm_agp = NULL;
@@ -271,22 +267,24 @@ drm_agp_head_t *DRM(agp_init)(void)
case INTEL_GX: head->chipset = "Intel 440GX"; break;
case INTEL_I810: head->chipset = "Intel i810"; break;
-#if LINUX_VERSION_CODE >= 0x020400
case INTEL_I815: head->chipset = "Intel i815"; break;
+#if LINUX_VERSION_CODE >= 0x020415
+ case INTEL_I820: head->chipset = "Intel i820"; break;
+#endif
case INTEL_I840: head->chipset = "Intel i840"; break;
- case INTEL_I850: head->chipset = "Intel i850"; break;
+#if LINUX_VERSION_CODE >= 0x020415
+ case INTEL_I845: head->chipset = "Intel i845"; break;
#endif
+ case INTEL_I850: head->chipset = "Intel i850"; break;
case VIA_GENERIC: head->chipset = "VIA"; break;
case VIA_VP3: head->chipset = "VIA VP3"; break;
case VIA_MVP3: head->chipset = "VIA MVP3"; break;
-#if LINUX_VERSION_CODE >= 0x020400
case VIA_MVP4: head->chipset = "VIA MVP4"; break;
case VIA_APOLLO_KX133: head->chipset = "VIA Apollo KX133";
break;
case VIA_APOLLO_KT133: head->chipset = "VIA Apollo KT133";
break;
-#endif
case VIA_APOLLO_PRO: head->chipset = "VIA Apollo Pro";
break;
@@ -316,6 +314,14 @@ drm_agp_head_t *DRM(agp_init)(void)
default: head->chipset = "Unknown"; break;
}
+#if LINUX_VERSION_CODE <= 0x020408
+ head->cant_use_aperture = 0;
+ head->page_mask = ~(0xfff);
+#else
+ head->cant_use_aperture = head->agp_info.cant_use_aperture;
+ head->page_mask = head->agp_info.page_mask;
+#endif
+
DRM_INFO("AGP %d.%d on %s @ 0x%08lx %ZuMB\n",
head->agp_info.version.major,
head->agp_info.version.minor,
diff --git a/linux-core/drm_bufs.c b/linux-core/drm_bufs.c
index ec5dd543..3ff3527f 100644
--- a/linux-core/drm_bufs.c
+++ b/linux-core/drm_bufs.c
@@ -229,11 +229,7 @@ int DRM(rmmap)(struct inode *inode, struct file *filp,
DRM(free)(list, sizeof(*list), DRM_MEM_MAPS);
for (pt = dev->vmalist, prev = NULL; pt; prev = pt, pt = pt->next) {
-#if LINUX_VERSION_CODE >= 0x020300
if (pt->vma->vm_private_data == map) found_maps++;
-#else
- if (pt->vma->vm_pte == map) found_maps++;
-#endif
}
if(!found_maps) {
diff --git a/linux-core/drm_context.c b/linux-core/drm_context.c
index eb4d61c8..e155946d 100644
--- a/linux-core/drm_context.c
+++ b/linux-core/drm_context.c
@@ -27,6 +27,10 @@
* Authors:
* Rickard E. (Rik) Faith <faith@valinux.com>
* Gareth Hughes <gareth@valinux.com>
+ * ChangeLog:
+ * 2001-11-16 Torsten Duwe <duwe@caldera.de>
+ * added context constructor/destructor hooks,
+ * needed by SiS driver's memory management.
*/
#define __NO_VERSION__
@@ -181,7 +185,7 @@ int DRM(setsareactx)(struct inode *inode, struct file *filp,
drm_device_t *dev = priv->dev;
drm_ctx_priv_map_t request;
drm_map_t *map = NULL;
- drm_map_list_t *r_list;
+ drm_map_list_t *r_list = NULL;
struct list_head *list;
if (copy_from_user(&request,
@@ -316,6 +320,10 @@ int DRM(addctx)( struct inode *inode, struct file *filp,
/* Should this return -EBUSY instead? */
return -ENOMEM;
}
+#ifdef DRIVER_CTX_CTOR
+ if ( ctx.handle != DRM_KERNEL_CONTEXT )
+ DRIVER_CTX_CTOR(ctx.handle); /* XXX: also pass dev ? */
+#endif
if ( copy_to_user( (drm_ctx_t *)arg, &ctx, sizeof(ctx) ) )
return -EFAULT;
@@ -390,6 +398,9 @@ int DRM(rmctx)( struct inode *inode, struct file *filp,
priv->remove_auth_on_close = 1;
}
if ( ctx.handle != DRM_KERNEL_CONTEXT ) {
+#ifdef DRIVER_CTX_DTOR
+ DRIVER_CTX_DTOR(ctx.handle); /* XXX: also pass dev ? */
+#endif
DRM(ctxbitmap_free)( dev, ctx.handle );
}
diff --git a/linux-core/drm_drv.c b/linux-core/drm_drv.c
index e1aff06d..23568288 100644
--- a/linux-core/drm_drv.c
+++ b/linux-core/drm_drv.c
@@ -113,7 +113,6 @@
#define DRIVER_IOCTLS
#endif
#ifndef DRIVER_FOPS
-#if LINUX_VERSION_CODE >= 0x020400
#define DRIVER_FOPS \
static struct file_operations DRM(fops) = { \
owner: THIS_MODULE, \
@@ -126,19 +125,6 @@ static struct file_operations DRM(fops) = { \
fasync: DRM(fasync), \
poll: DRM(poll), \
}
-#else
-#define DRIVER_FOPS \
-static struct file_operations DRM(fops) = { \
- open: DRM(open), \
- flush: DRM(flush), \
- release: DRM(release), \
- ioctl: DRM(ioctl), \
- mmap: DRM(mmap), \
- read: DRM(read), \
- fasync: DRM(fasync), \
- poll: DRM(poll), \
-}
-#endif
#endif
@@ -234,6 +220,9 @@ static char *drm_opts = NULL;
MODULE_AUTHOR( DRIVER_AUTHOR );
MODULE_DESCRIPTION( DRIVER_DESC );
MODULE_PARM( drm_opts, "s" );
+#ifdef MODULE_LICENSE
+MODULE_LICENSE("GPL and additional rights");
+#endif
static int DRM(setup)( drm_device_t *dev )
{
@@ -731,9 +720,6 @@ int DRM(open)( struct inode *inode, struct file *filp )
retcode = DRM(open_helper)( inode, filp, dev );
if ( !retcode ) {
-#if LINUX_VERSION_CODE < 0x020333
- MOD_INC_USE_COUNT; /* Needed before Linux 2.3.51 */
-#endif
atomic_inc( &dev->counts[_DRM_STAT_OPENS] );
spin_lock( &dev->count_lock );
if ( !dev->open_count++ ) {
@@ -852,9 +838,6 @@ int DRM(release)( struct inode *inode, struct file *filp )
* End inline drm_release
*/
-#if LINUX_VERSION_CODE < 0x020333
- MOD_DEC_USE_COUNT; /* Needed before Linux 2.3.51 */
-#endif
atomic_inc( &dev->counts[_DRM_STAT_CLOSES] );
spin_lock( &dev->count_lock );
if ( !--dev->open_count ) {
diff --git a/linux-core/drm_fops.c b/linux-core/drm_fops.c
index 3656c5e9..9c2135fe 100644
--- a/linux-core/drm_fops.c
+++ b/linux-core/drm_fops.c
@@ -125,21 +125,31 @@ ssize_t DRM(read)(struct file *filp, char *buf, size_t count, loff_t *off)
int avail;
int send;
int cur;
+ DECLARE_WAITQUEUE(wait, current);
DRM_DEBUG("%p, %p\n", dev->buf_rp, dev->buf_wp);
+ add_wait_queue(&dev->buf_readers, &wait);
+ set_current_state(TASK_INTERRUPTIBLE);
while (dev->buf_rp == dev->buf_wp) {
DRM_DEBUG(" sleeping\n");
if (filp->f_flags & O_NONBLOCK) {
+ remove_wait_queue(&dev->buf_readers, &wait);
+ set_current_state(TASK_RUNNING);
return -EAGAIN;
}
- interruptible_sleep_on(&dev->buf_readers);
+ schedule(); /* wait for dev->buf_readers */
if (signal_pending(current)) {
DRM_DEBUG(" interrupted\n");
+ remove_wait_queue(&dev->buf_readers, &wait);
+ set_current_state(TASK_RUNNING);
return -ERESTARTSYS;
}
DRM_DEBUG(" awake\n");
+ set_current_state(TASK_INTERRUPTIBLE);
}
+ remove_wait_queue(&dev->buf_readers, &wait);
+ set_current_state(TASK_RUNNING);
left = (dev->buf_rp + DRM_BSZ - dev->buf_wp) % DRM_BSZ;
avail = DRM_BSZ - left;
@@ -191,24 +201,8 @@ int DRM(write_string)(drm_device_t *dev, const char *s)
send -= count;
}
-#if LINUX_VERSION_CODE < 0x020315 && !defined(KILLFASYNCHASTHREEPARAMETERS)
- /* The extra parameter to kill_fasync was added in 2.3.21, and is
- _not_ present in _stock_ 2.2.14 and 2.2.15. However, some
- distributions patch 2.2.x kernels to add this parameter. The
- Makefile.linux attempts to detect this addition and defines
- KILLFASYNCHASTHREEPARAMETERS if three parameters are found. */
- if (dev->buf_async) kill_fasync(dev->buf_async, SIGIO);
-#else
-
- /* Parameter added in 2.3.21. */
-#if LINUX_VERSION_CODE < 0x020400
- if (dev->buf_async) kill_fasync(dev->buf_async, SIGIO, POLL_IN);
-#else
- /* Type of first parameter changed in
- Linux 2.4.0-test2... */
if (dev->buf_async) kill_fasync(&dev->buf_async, SIGIO, POLL_IN);
-#endif
-#endif
+
DRM_DEBUG("waking\n");
wake_up_interruptible(&dev->buf_readers);
return 0;
diff --git a/linux-core/drm_ioctl.c b/linux-core/drm_ioctl.c
index adcb22c8..0d8a1259 100644
--- a/linux-core/drm_ioctl.c
+++ b/linux-core/drm_ioctl.c
@@ -98,7 +98,6 @@ int DRM(setunique)(struct inode *inode, struct file *filp,
}
sprintf(dev->devname, "%s@%s", dev->name, dev->unique);
-#ifdef __alpha__
do {
struct pci_dev *pci_dev;
int b, d, f;
@@ -116,10 +115,11 @@ int DRM(setunique)(struct inode *inode, struct file *filp,
pci_dev = pci_find_slot(b, PCI_DEVFN(d,f));
if (pci_dev) {
dev->pdev = pci_dev;
+#ifdef __alpha__
dev->hose = pci_dev->sysdata;
+#endif
}
} while(0);
-#endif
return 0;
}
diff --git a/linux-core/drm_memory.h b/linux-core/drm_memory.h
index 498937d4..f11b80c3 100644
--- a/linux-core/drm_memory.h
+++ b/linux-core/drm_memory.h
@@ -85,12 +85,7 @@ void DRM(mem_init)(void)
}
si_meminfo(&si);
-#if LINUX_VERSION_CODE < 0x020317
- /* Changed to page count in 2.3.23 */
- DRM(ram_available) = si.totalram >> PAGE_SHIFT;
-#else
DRM(ram_available) = si.totalram;
-#endif
DRM(ram_used) = 0;
}
@@ -257,12 +252,7 @@ unsigned long DRM(alloc_pages)(int order, int area)
for (addr = address, sz = bytes;
sz > 0;
addr += PAGE_SIZE, sz -= PAGE_SIZE) {
-#if LINUX_VERSION_CODE >= 0x020400
- /* Argument type changed in 2.4.0-test6/pre8 */
mem_map_reserve(virt_to_page(addr));
-#else
- mem_map_reserve(MAP_NR(addr));
-#endif
}
return address;
@@ -283,12 +273,7 @@ void DRM(free_pages)(unsigned long address, int order, int area)
for (addr = address, sz = bytes;
sz > 0;
addr += PAGE_SIZE, sz -= PAGE_SIZE) {
-#if LINUX_VERSION_CODE >= 0x020400
- /* Argument type changed in 2.4.0-test6/pre8 */
mem_map_unreserve(virt_to_page(addr));
-#else
- mem_map_unreserve(MAP_NR(addr));
-#endif
}
free_pages(address, order);
}
diff --git a/linux-core/drm_proc.c b/linux-core/drm_proc.c
index f65f42b7..24e8556f 100644
--- a/linux-core/drm_proc.c
+++ b/linux-core/drm_proc.c
@@ -186,7 +186,7 @@ static int DRM(_vm_info)(char *buf, char **start, off_t offset, int request,
DRM_PROC_PRINT("slot offset size type flags "
"address mtrr\n\n");
i = 0;
- list_for_each(list, &dev->maplist->head) {
+ if (dev->maplist != NULL) list_for_each(list, &dev->maplist->head) {
r_list = (drm_map_list_t *)list;
map = r_list->map;
if(!map) continue;
diff --git a/linux-core/drm_scatter.c b/linux-core/drm_scatter.c
index 66d34968..a6b8275f 100644
--- a/linux-core/drm_scatter.c
+++ b/linux-core/drm_scatter.c
@@ -47,11 +47,9 @@ void DRM(sg_cleanup)( drm_sg_mem_t *entry )
vfree( entry->virtual );
-#if defined(__alpha__) && (LINUX_VERSION_CODE >= 0x020400)
DRM(free)( entry->busaddr,
entry->pages * sizeof(*entry->busaddr),
DRM_MEM_PAGES );
-#endif
DRM(free)( entry->pagelist,
entry->pages * sizeof(*entry->pagelist),
DRM_MEM_PAGES );
@@ -99,7 +97,6 @@ int DRM(sg_alloc)( struct inode *inode, struct file *filp,
return -ENOMEM;
}
-#if defined(__alpha__) && (LINUX_VERSION_CODE >= 0x020400)
entry->busaddr = DRM(alloc)( pages * sizeof(*entry->busaddr),
DRM_MEM_PAGES );
if ( !entry->busaddr ) {
@@ -112,15 +109,12 @@ int DRM(sg_alloc)( struct inode *inode, struct file *filp,
return -ENOMEM;
}
memset( (void *)entry->busaddr, 0, pages * sizeof(*entry->busaddr) );
-#endif
entry->virtual = vmalloc_32( pages << PAGE_SHIFT );
if ( !entry->virtual ) {
-#if defined(__alpha__) && (LINUX_VERSION_CODE >= 0x020400)
DRM(free)( entry->busaddr,
entry->pages * sizeof(*entry->busaddr),
DRM_MEM_PAGES );
-#endif
DRM(free)( entry->pagelist,
entry->pages * sizeof(*entry->pagelist),
DRM_MEM_PAGES );
diff --git a/linux-core/drm_stub.c b/linux-core/drm_stub.c
index fe8a1bc9..ead5b959 100644
--- a/linux-core/drm_stub.c
+++ b/linux-core/drm_stub.c
@@ -31,10 +31,6 @@
#define __NO_VERSION__
#include "drmP.h"
-#if LINUX_VERSION_CODE < 0x020400
-#include "stubsupport-pre24.h"
-#endif
-
#define DRM_STUB_MAXCARDS 16 /* Enough for one machine */
static struct drm_stub_list {
@@ -70,9 +66,7 @@ static int DRM(stub_open)(struct inode *inode, struct file *filp)
}
static struct file_operations DRM(stub_fops) = {
-#if LINUX_VERSION_CODE >= 0x020400
owner: THIS_MODULE,
-#endif
open: DRM(stub_open)
};
diff --git a/linux-core/drm_vm.c b/linux-core/drm_vm.c
index d9ee0ff4..5fd3571a 100644
--- a/linux-core/drm_vm.c
+++ b/linux-core/drm_vm.c
@@ -56,18 +56,11 @@ struct vm_operations_struct DRM(vm_sg_ops) = {
close: DRM(vm_close),
};
-#if LINUX_VERSION_CODE < 0x020317
-unsigned long DRM(vm_nopage)(struct vm_area_struct *vma,
- unsigned long address,
- int write_access)
-#else
- /* Return type changed in 2.3.23 */
struct page *DRM(vm_nopage)(struct vm_area_struct *vma,
unsigned long address,
int write_access)
-#endif
{
-#if defined(__alpha__) && __REALLY_HAVE_AGP
+#if __REALLY_HAVE_AGP
drm_file_t *priv = vma->vm_file->private_data;
drm_device_t *dev = priv->dev;
drm_map_t *map = NULL;
@@ -77,6 +70,9 @@ struct page *DRM(vm_nopage)(struct vm_area_struct *vma,
/*
* Find the right map
*/
+
+ if(!dev->agp->cant_use_aperture) goto vm_nopage_error;
+
list_for_each(list, &dev->maplist->head) {
r_list = (drm_map_list_t *)list;
map = r_list->map;
@@ -90,10 +86,12 @@ struct page *DRM(vm_nopage)(struct vm_area_struct *vma,
struct drm_agp_mem *agpmem;
struct page *page;
+#if __alpha__
/*
- * Make it a bus-relative address
+ * Adjust to a bus-relative address
*/
baddr -= dev->hose->mem_space->start;
+#endif
/*
* It's AGP memory - find the real physical page to map
@@ -104,50 +102,32 @@ struct page *DRM(vm_nopage)(struct vm_area_struct *vma,
break;
}
- if (!agpmem) {
- /*
- * Oops - no memory found
- */
- return NOPAGE_SIGBUS; /* couldn't find it */
- }
+ if (!agpmem) goto vm_nopage_error;
/*
* Get the page, inc the use count, and return it
*/
offset = (baddr - agpmem->bound) >> PAGE_SHIFT;
- agpmem->memory->memory[offset] &= ~1UL; /* HACK */
+ agpmem->memory->memory[offset] &= dev->agp->page_mask;
page = virt_to_page(__va(agpmem->memory->memory[offset]));
-#if 0
- DRM_ERROR("baddr = 0x%lx page = 0x%lx, offset = 0x%lx\n",
- baddr, __va(agpmem->memory->memory[offset]), offset);
-#endif
get_page(page);
-#if LINUX_VERSION_CODE < 0x020317
- return page_address(page);
-#else
+
+ DRM_DEBUG("baddr = 0x%lx page = 0x%p, offset = 0x%lx\n",
+ baddr, __va(agpmem->memory->memory[offset]), offset);
+
return page;
-#endif
}
-#endif
+vm_nopage_error:
+#endif /* __REALLY_HAVE_AGP */
+
return NOPAGE_SIGBUS; /* Disallow mremap */
}
-#if LINUX_VERSION_CODE < 0x020317
-unsigned long DRM(vm_shm_nopage)(struct vm_area_struct *vma,
- unsigned long address,
- int write_access)
-#else
- /* Return type changed in 2.3.23 */
struct page *DRM(vm_shm_nopage)(struct vm_area_struct *vma,
unsigned long address,
int write_access)
-#endif
{
-#if LINUX_VERSION_CODE >= 0x020300
drm_map_t *map = (drm_map_t *)vma->vm_private_data;
-#else
- drm_map_t *map = (drm_map_t *)vma->vm_pte;
-#endif
unsigned long offset;
unsigned long i;
pgd_t *pgd;
@@ -174,11 +154,7 @@ struct page *DRM(vm_shm_nopage)(struct vm_area_struct *vma,
get_page(page);
DRM_DEBUG("0x%08lx => 0x%08x\n", address, page_to_bus(page));
-#if LINUX_VERSION_CODE < 0x020317
- return page_address(page);
-#else
return page;
-#endif
}
/* Special close routine which deletes map information if we are the last
@@ -197,25 +173,14 @@ void DRM(vm_shm_close)(struct vm_area_struct *vma)
DRM_DEBUG("0x%08lx,0x%08lx\n",
vma->vm_start, vma->vm_end - vma->vm_start);
-#if LINUX_VERSION_CODE < 0x020333
- MOD_DEC_USE_COUNT; /* Needed before Linux 2.3.51 */
-#endif
atomic_dec(&dev->vma_count);
-#if LINUX_VERSION_CODE >= 0x020300
map = vma->vm_private_data;
-#else
- map = vma->vm_pte;
-#endif
down(&dev->struct_sem);
for (pt = dev->vmalist, prev = NULL; pt; pt = next) {
next = pt->next;
-#if LINUX_VERSION_CODE >= 0x020300
if (pt->vma->vm_private_data == map) found_maps++;
-#else
- if (pt->vma->vm_pte == map) found_maps++;
-#endif
if (pt->vma == vma) {
if (prev) {
prev->next = pt->next;
@@ -268,16 +233,9 @@ void DRM(vm_shm_close)(struct vm_area_struct *vma)
up(&dev->struct_sem);
}
-#if LINUX_VERSION_CODE < 0x020317
-unsigned long DRM(vm_dma_nopage)(struct vm_area_struct *vma,
- unsigned long address,
- int write_access)
-#else
- /* Return type changed in 2.3.23 */
struct page *DRM(vm_dma_nopage)(struct vm_area_struct *vma,
unsigned long address,
int write_access)
-#endif
{
drm_file_t *priv = vma->vm_file->private_data;
drm_device_t *dev = priv->dev;
@@ -299,29 +257,14 @@ struct page *DRM(vm_dma_nopage)(struct vm_area_struct *vma,
DRM_DEBUG("0x%08lx (page %lu) => 0x%08x\n", address, page_nr,
page_to_bus(page));
-#if LINUX_VERSION_CODE < 0x020317
- return page_address(page);
-#else
return page;
-#endif
}
-#if LINUX_VERSION_CODE < 0x020317
-unsigned long DRM(vm_sg_nopage)(struct vm_area_struct *vma,
- unsigned long address,
- int write_access)
-#else
- /* Return type changed in 2.3.23 */
struct page *DRM(vm_sg_nopage)(struct vm_area_struct *vma,
unsigned long address,
int write_access)
-#endif
{
-#if LINUX_VERSION_CODE >= 0x020300
drm_map_t *map = (drm_map_t *)vma->vm_private_data;
-#else
- drm_map_t *map = (drm_map_t *)vma->vm_pte;
-#endif
drm_file_t *priv = vma->vm_file->private_data;
drm_device_t *dev = priv->dev;
drm_sg_mem_t *entry = dev->sg;
@@ -341,11 +284,7 @@ struct page *DRM(vm_sg_nopage)(struct vm_area_struct *vma,
page = entry->pagelist[page_offset];
get_page(page);
-#if LINUX_VERSION_CODE < 0x020317
- return page_address(page);
-#else
return page;
-#endif
}
void DRM(vm_open)(struct vm_area_struct *vma)
@@ -357,10 +296,6 @@ void DRM(vm_open)(struct vm_area_struct *vma)
DRM_DEBUG("0x%08lx,0x%08lx\n",
vma->vm_start, vma->vm_end - vma->vm_start);
atomic_inc(&dev->vma_count);
-#if LINUX_VERSION_CODE < 0x020333
- /* The map can exist after the fd is closed. */
- MOD_INC_USE_COUNT; /* Needed before Linux 2.3.51 */
-#endif
vma_entry = DRM(alloc)(sizeof(*vma_entry), DRM_MEM_VMAS);
if (vma_entry) {
@@ -381,9 +316,6 @@ void DRM(vm_close)(struct vm_area_struct *vma)
DRM_DEBUG("0x%08lx,0x%08lx\n",
vma->vm_start, vma->vm_end - vma->vm_start);
-#if LINUX_VERSION_CODE < 0x020333
- MOD_DEC_USE_COUNT; /* Needed before Linux 2.3.51 */
-#endif
atomic_dec(&dev->vma_count);
down(&dev->struct_sem);
@@ -422,13 +354,13 @@ int DRM(mmap_dma)(struct file *filp, struct vm_area_struct *vma)
unlock_kernel();
vma->vm_ops = &DRM(vm_dma_ops);
- vma->vm_flags |= VM_LOCKED | VM_SHM; /* Don't swap */
-#if LINUX_VERSION_CODE < 0x020203 /* KERNEL_VERSION(2,2,3) */
- /* In Linux 2.2.3 and above, this is
- handled in do_mmap() in mm/mmap.c. */
- ++filp->f_count;
+#if LINUX_VERSION_CODE <= 0x020414
+ vma->vm_flags |= VM_LOCKED | VM_SHM; /* Don't swap */
+#else
+ vma->vm_flags |= VM_RESERVED; /* Don't swap */
#endif
+
vma->vm_file = filp; /* Needed for drm_vm_open() */
DRM(vm_open)(vma);
return 0;
@@ -543,34 +475,33 @@ int DRM(mmap)(struct file *filp, struct vm_area_struct *vma)
break;
case _DRM_SHM:
vma->vm_ops = &DRM(vm_shm_ops);
-#if LINUX_VERSION_CODE >= 0x020300
vma->vm_private_data = (void *)map;
-#else
- vma->vm_pte = (unsigned long)map;
-#endif
/* Don't let this area swap. Change when
DRM_KERNEL advisory is supported. */
+#if LINUX_VERSION_CODE <= 0x020414
vma->vm_flags |= VM_LOCKED;
+#else
+ vma->vm_flags |= VM_RESERVED;
+#endif
break;
case _DRM_SCATTER_GATHER:
vma->vm_ops = &DRM(vm_sg_ops);
-#if LINUX_VERSION_CODE >= 0x020300
vma->vm_private_data = (void *)map;
+#if LINUX_VERSION_CODE <= 0x020414
+ vma->vm_flags |= VM_LOCKED;
#else
- vma->vm_pte = (unsigned long)map;
+ vma->vm_flags |= VM_RESERVED;
#endif
- vma->vm_flags |= VM_LOCKED;
break;
default:
return -EINVAL; /* This should never happen. */
}
+#if LINUX_VERSION_CODE <= 0x020414
vma->vm_flags |= VM_LOCKED | VM_SHM; /* Don't swap */
-
-#if LINUX_VERSION_CODE < 0x020203 /* KERNEL_VERSION(2,2,3) */
- /* In Linux 2.2.3 and above, this is
- handled in do_mmap() in mm/mmap.c. */
- ++filp->f_count;
+#else
+ vma->vm_flags |= VM_RESERVED; /* Don't swap */
#endif
+
vma->vm_file = filp; /* Needed for drm_vm_open() */
DRM(vm_open)(vma);
return 0;
diff --git a/linux-core/i810_dma.c b/linux-core/i810_dma.c
index d6933d57..ae0a3616 100644
--- a/linux-core/i810_dma.c
+++ b/linux-core/i810_dma.c
@@ -86,6 +86,7 @@ static inline void i810_print_status_page(drm_device_t *dev)
DRM_DEBUG( "hw_status: LpRing Head ptr : %x\n", temp[1]);
DRM_DEBUG( "hw_status: IRing Head ptr : %x\n", temp[2]);
DRM_DEBUG( "hw_status: Reserved : %x\n", temp[3]);
+ DRM_DEBUG( "hw_status: Last Render: %x\n", temp[4]);
DRM_DEBUG( "hw_status: Driver Counter : %d\n", temp[5]);
for(i = 6; i < dma->buf_count + 6; i++) {
DRM_DEBUG( "buffer status idx : %d used: %d\n", i - 6, temp[i]);
@@ -227,14 +228,9 @@ static int i810_unmap_buffer(drm_buf_t *buf)
#else
down_write( &current->mm->mmap_sem );
#endif
-#if LINUX_VERSION_CODE < 0x020399
- retcode = do_munmap((unsigned long)buf_priv->virtual,
- (size_t) buf->total);
-#else
retcode = do_munmap(current->mm,
(unsigned long)buf_priv->virtual,
(size_t) buf->total);
-#endif
#if LINUX_VERSION_CODE <= 0x020402
up( &current->mm->mmap_sem );
#else
@@ -476,6 +472,9 @@ static int i810_dma_initialize(drm_device_t *dev,
dev_priv->back_offset = init->back_offset;
dev_priv->depth_offset = init->depth_offset;
+ dev_priv->overlay_offset = init->overlay_offset;
+ dev_priv->overlay_physical = init->overlay_physical;
+
dev_priv->front_di1 = init->front_offset | init->pitch_bits;
dev_priv->back_di1 = init->back_offset | init->pitch_bits;
dev_priv->zi1 = init->depth_offset | init->pitch_bits;
@@ -1264,3 +1263,156 @@ int i810_docopy(struct inode *inode, struct file *filp, unsigned int cmd,
if(VM_DONTCOPY == 0) return 1;
return 0;
}
+
+static void i810_dma_dispatch_mc(drm_device_t *dev, drm_buf_t *buf, int used,
+ unsigned int last_render)
+{
+ drm_i810_private_t *dev_priv = dev->dev_private;
+ drm_i810_buf_priv_t *buf_priv = buf->dev_private;
+ drm_i810_sarea_t *sarea_priv = dev_priv->sarea_priv;
+ unsigned long address = (unsigned long)buf->bus_address;
+ unsigned long start = address - dev->agp->base;
+ int u;
+ RING_LOCALS;
+
+ i810_kernel_lost_context(dev);
+
+ u = cmpxchg(buf_priv->in_use, I810_BUF_CLIENT,
+ I810_BUF_HARDWARE);
+ if(u != I810_BUF_CLIENT) {
+ DRM_DEBUG("MC found buffer that isn't mine!\n");
+ }
+
+ if (used > 4*1024)
+ used = 0;
+
+ sarea_priv->dirty = 0x7f;
+
+ DRM_DEBUG("dispatch mc addr 0x%lx, used 0x%x\n",
+ address, used);
+
+ dev_priv->counter++;
+ DRM_DEBUG("dispatch counter : %ld\n", dev_priv->counter);
+ DRM_DEBUG("i810_dma_dispatch_mc\n");
+ DRM_DEBUG("start : %lx\n", start);
+ DRM_DEBUG("used : %d\n", used);
+ DRM_DEBUG("start + used - 4 : %ld\n", start + used - 4);
+
+ if (buf_priv->currently_mapped == I810_BUF_MAPPED) {
+ if (used & 4) {
+ *(u32 *)((u32)buf_priv->virtual + used) = 0;
+ used += 4;
+ }
+
+ i810_unmap_buffer(buf);
+ }
+ BEGIN_LP_RING(4);
+ OUT_RING( CMD_OP_BATCH_BUFFER );
+ OUT_RING( start | BB1_PROTECTED );
+ OUT_RING( start + used - 4 );
+ OUT_RING( 0 );
+ ADVANCE_LP_RING();
+
+
+ BEGIN_LP_RING(8);
+ OUT_RING( CMD_STORE_DWORD_IDX );
+ OUT_RING( buf_priv->my_use_idx );
+ OUT_RING( I810_BUF_FREE );
+ OUT_RING( 0 );
+
+ OUT_RING( CMD_STORE_DWORD_IDX );
+ OUT_RING( 16 );
+ OUT_RING( last_render );
+ OUT_RING( 0 );
+ ADVANCE_LP_RING();
+}
+
+int i810_dma_mc(struct inode *inode, struct file *filp,
+ unsigned int cmd, unsigned long arg)
+{
+ drm_file_t *priv = filp->private_data;
+ drm_device_t *dev = priv->dev;
+ drm_device_dma_t *dma = dev->dma;
+ drm_i810_private_t *dev_priv = (drm_i810_private_t *)dev->dev_private;
+ u32 *hw_status = (u32 *)dev_priv->hw_status_page;
+ drm_i810_sarea_t *sarea_priv = (drm_i810_sarea_t *)
+ dev_priv->sarea_priv;
+ drm_i810_mc_t mc;
+
+ if (copy_from_user(&mc, (drm_i810_mc_t *)arg, sizeof(mc)))
+ return -EFAULT;
+
+
+ if(!_DRM_LOCK_IS_HELD(dev->lock.hw_lock->lock)) {
+ DRM_ERROR("i810_dma_mc called without lock held\n");
+ return -EINVAL;
+ }
+
+ i810_dma_dispatch_mc(dev, dma->buflist[mc.idx], mc.used,
+ mc.last_render );
+
+ atomic_add(mc.used, &dev->counts[_DRM_STAT_SECONDARY]);
+ atomic_inc(&dev->counts[_DRM_STAT_DMA]);
+ sarea_priv->last_enqueue = dev_priv->counter-1;
+ sarea_priv->last_dispatch = (int) hw_status[5];
+
+ return 0;
+}
+
+int i810_rstatus(struct inode *inode, struct file *filp,
+ unsigned int cmd, unsigned long arg)
+{
+ drm_file_t *priv = filp->private_data;
+ drm_device_t *dev = priv->dev;
+ drm_i810_private_t *dev_priv = (drm_i810_private_t *)dev->dev_private;
+
+ return (int)(((u32 *)(dev_priv->hw_status_page))[4]);
+}
+
+int i810_ov0_info(struct inode *inode, struct file *filp,
+ unsigned int cmd, unsigned long arg)
+{
+ drm_file_t *priv = filp->private_data;
+ drm_device_t *dev = priv->dev;
+ drm_i810_private_t *dev_priv = (drm_i810_private_t *)dev->dev_private;
+ drm_i810_overlay_t data;
+
+ data.offset = dev_priv->overlay_offset;
+ data.physical = dev_priv->overlay_physical;
+ copy_to_user((drm_i810_overlay_t *)arg,&data,sizeof(data));
+ return 0;
+}
+
+int i810_fstatus(struct inode *inode, struct file *filp,
+ unsigned int cmd, unsigned long arg)
+{
+ drm_file_t *priv = filp->private_data;
+ drm_device_t *dev = priv->dev;
+ drm_i810_private_t *dev_priv = (drm_i810_private_t *)dev->dev_private;
+
+ if(!_DRM_LOCK_IS_HELD(dev->lock.hw_lock->lock)) {
+ DRM_ERROR("i810_fstatus called without lock held\n");
+ return -EINVAL;
+ }
+ return I810_READ(0x30008);
+}
+
+int i810_ov0_flip(struct inode *inode, struct file *filp,
+ unsigned int cmd, unsigned long arg)
+{
+ drm_file_t *priv = filp->private_data;
+ drm_device_t *dev = priv->dev;
+ drm_i810_private_t *dev_priv = (drm_i810_private_t *)dev->dev_private;
+
+ if(!_DRM_LOCK_IS_HELD(dev->lock.hw_lock->lock)) {
+ DRM_ERROR("i810_ov0_flip called without lock held\n");
+ return -EINVAL;
+ }
+
+ //Tell the overlay to update
+ I810_WRITE(0x30000,dev_priv->overlay_physical | 0x80000000);
+
+ return 0;
+}
+
+
diff --git a/linux-core/i810_drm.h b/linux-core/i810_drm.h
index 5d47adda..8413293b 100644
--- a/linux-core/i810_drm.h
+++ b/linux-core/i810_drm.h
@@ -112,6 +112,8 @@ typedef struct _drm_i810_init {
unsigned int front_offset;
unsigned int back_offset;
unsigned int depth_offset;
+ unsigned int overlay_offset;
+ unsigned int overlay_physical;
unsigned int w;
unsigned int h;
unsigned int pitch;
@@ -196,4 +198,18 @@ typedef struct drm_i810_dma {
int granted;
} drm_i810_dma_t;
+typedef struct _drm_i810_overlay_t {
+ unsigned int offset; /* Address of the Overlay Regs */
+ unsigned int physical;
+} drm_i810_overlay_t;
+
+typedef struct _drm_i810_mc {
+ int idx; /* buffer index */
+ int used; /* nr bytes in use */
+ int num_blocks; /* number of GFXBlocks */
+ int *length; /* List of lengths for GFXBlocks (FUTURE)*/
+ unsigned int last_render; /* Last Render Request */
+} drm_i810_mc_t;
+
+
#endif /* _I810_DRM_H_ */
diff --git a/linux-core/i810_drv.c b/linux-core/i810_drv.c
index a228cb46..559b2617 100644
--- a/linux-core/i810_drv.c
+++ b/linux-core/i810_drv.c
@@ -39,10 +39,10 @@
#define DRIVER_NAME "i810"
#define DRIVER_DESC "Intel i810"
-#define DRIVER_DATE "20010616"
+#define DRIVER_DATE "20010920"
#define DRIVER_MAJOR 1
-#define DRIVER_MINOR 1
+#define DRIVER_MINOR 2
#define DRIVER_PATCHLEVEL 0
#define DRIVER_IOCTLS \
@@ -54,7 +54,12 @@
[DRM_IOCTL_NR(DRM_IOCTL_I810_GETBUF)] = { i810_getbuf, 1, 0 }, \
[DRM_IOCTL_NR(DRM_IOCTL_I810_SWAP)] = { i810_swap_bufs, 1, 0 }, \
[DRM_IOCTL_NR(DRM_IOCTL_I810_COPY)] = { i810_copybuf, 1, 0 }, \
- [DRM_IOCTL_NR(DRM_IOCTL_I810_DOCOPY)] = { i810_docopy, 1, 0 },
+ [DRM_IOCTL_NR(DRM_IOCTL_I810_DOCOPY)] = { i810_docopy, 1, 0 }, \
+ [DRM_IOCTL_NR(DRM_IOCTL_I810_OV0INFO)] = { i810_ov0_info, 1, 0 }, \
+ [DRM_IOCTL_NR(DRM_IOCTL_I810_FSTATUS)] = { i810_fstatus, 1, 0 }, \
+ [DRM_IOCTL_NR(DRM_IOCTL_I810_OV0FLIP)] = { i810_ov0_flip, 1, 0 }, \
+ [DRM_IOCTL_NR(DRM_IOCTL_I810_MC)] = { i810_dma_mc, 1, 1 }, \
+ [DRM_IOCTL_NR(DRM_IOCTL_I810_RSTATUS)] = { i810_rstatus, 1, 0 }
#define __HAVE_COUNTERS 4
diff --git a/linux-core/i810_drv.h b/linux-core/i810_drv.h
index e1b17148..a27384d4 100644
--- a/linux-core/i810_drv.h
+++ b/linux-core/i810_drv.h
@@ -73,6 +73,8 @@ typedef struct drm_i810_private {
int back_offset;
int depth_offset;
+ int overlay_offset;
+ int overlay_physical;
int w, h;
int pitch;
} drm_i810_private_t;
@@ -94,6 +96,18 @@ extern int i810_copybuf(struct inode *inode, struct file *filp,
extern int i810_docopy(struct inode *inode, struct file *filp,
unsigned int cmd, unsigned long arg);
+extern int i810_rstatus(struct inode *inode, struct file *filp,
+ unsigned int cmd, unsigned long arg);
+extern int i810_ov0_info(struct inode *inode, struct file *filp,
+ unsigned int cmd, unsigned long arg);
+extern int i810_fstatus(struct inode *inode, struct file *filp,
+ unsigned int cmd, unsigned long arg);
+extern int i810_ov0_flip(struct inode *inode, struct file *filp,
+ unsigned int cmd, unsigned long arg);
+extern int i810_dma_mc(struct inode *inode, struct file *filp,
+ unsigned int cmd, unsigned long arg);
+
+
extern void i810_dma_quiescent(drm_device_t *dev);
#define I810_VERBOSE 0
diff --git a/linux-core/i830_dma.c b/linux-core/i830_dma.c
new file mode 100644
index 00000000..661987fb
--- /dev/null
+++ b/linux-core/i830_dma.c
@@ -0,0 +1,1418 @@
+/* i830_dma.c -- DMA support for the I830 -*- linux-c -*-
+ * Created: Mon Dec 13 01:50:01 1999 by jhartmann@precisioninsight.com
+ *
+ * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
+ * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Rickard E. (Rik) Faith <faith@valinux.com>
+ * Jeff Hartmann <jhartmann@valinux.com>
+ * Keith Whitwell <keithw@valinux.com>
+ * Abraham vd Merwe <abraham@2d3d.co.za>
+ *
+ */
+
+#define __NO_VERSION__
+#include "i830.h"
+#include "drmP.h"
+#include "i830_drv.h"
+#include <linux/interrupt.h> /* For task queue support */
+
+/* in case we don't have a 2.3.99-pre6 kernel or later: */
+#ifndef VM_DONTCOPY
+#define VM_DONTCOPY 0
+#endif
+
+#define I830_BUF_FREE 2
+#define I830_BUF_CLIENT 1
+#define I830_BUF_HARDWARE 0
+
+#define I830_BUF_UNMAPPED 0
+#define I830_BUF_MAPPED 1
+
+#define RING_LOCALS unsigned int outring, ringmask; volatile char *virt;
+
+
+#define DO_IDLE_WORKAROUND() \
+do { \
+ int _head; \
+ int _tail; \
+ int _i; \
+ do { \
+ _head = I830_READ(LP_RING + RING_HEAD) & HEAD_ADDR; \
+ _tail = I830_READ(LP_RING + RING_TAIL) & TAIL_ADDR; \
+ for(_i = 0; _i < 65535; _i++); \
+ } while(_head != _tail); \
+} while(0)
+
+#define I830_SYNC_WORKAROUND 0
+
+#define BEGIN_LP_RING(n) do { \
+ if (I830_VERBOSE) \
+ DRM_DEBUG("BEGIN_LP_RING(%d) in %s\n", \
+ n, __FUNCTION__); \
+ if (I830_SYNC_WORKAROUND) \
+ DO_IDLE_WORKAROUND(); \
+ if (dev_priv->ring.space < n*4) \
+ i830_wait_ring(dev, n*4); \
+ dev_priv->ring.space -= n*4; \
+ outring = dev_priv->ring.tail; \
+ ringmask = dev_priv->ring.tail_mask; \
+ virt = dev_priv->ring.virtual_start; \
+} while (0)
+
+#define ADVANCE_LP_RING() do { \
+ if (I830_VERBOSE) DRM_DEBUG("ADVANCE_LP_RING\n"); \
+ dev_priv->ring.tail = outring; \
+ I830_WRITE(LP_RING + RING_TAIL, outring); \
+} while(0)
+
+#define OUT_RING(n) do { \
+ if (I830_VERBOSE) DRM_DEBUG(" OUT_RING %x\n", (int)(n)); \
+ *(volatile unsigned int *)(virt + outring) = n; \
+ outring += 4; \
+ outring &= ringmask; \
+} while (0);
+
+static inline void i830_print_status_page(drm_device_t *dev)
+{
+ drm_device_dma_t *dma = dev->dma;
+ drm_i830_private_t *dev_priv = dev->dev_private;
+ u32 *temp = (u32 *)dev_priv->hw_status_page;
+ int i;
+
+ DRM_DEBUG( "hw_status: Interrupt Status : %x\n", temp[0]);
+ DRM_DEBUG( "hw_status: LpRing Head ptr : %x\n", temp[1]);
+ DRM_DEBUG( "hw_status: IRing Head ptr : %x\n", temp[2]);
+ DRM_DEBUG( "hw_status: Reserved : %x\n", temp[3]);
+ DRM_DEBUG( "hw_status: Driver Counter : %d\n", temp[5]);
+ for(i = 9; i < dma->buf_count + 9; i++) {
+ DRM_DEBUG( "buffer status idx : %d used: %d\n", i - 9, temp[i]);
+ }
+}
+
+static drm_buf_t *i830_freelist_get(drm_device_t *dev)
+{
+ drm_device_dma_t *dma = dev->dma;
+ int i;
+ int used;
+
+ /* Linear search might not be the best solution */
+
+ for (i = 0; i < dma->buf_count; i++) {
+ drm_buf_t *buf = dma->buflist[ i ];
+ drm_i830_buf_priv_t *buf_priv = buf->dev_private;
+ /* In use is already a pointer */
+ used = cmpxchg(buf_priv->in_use, I830_BUF_FREE,
+ I830_BUF_CLIENT);
+ if(used == I830_BUF_FREE) {
+ return buf;
+ }
+ }
+ return NULL;
+}
+
+/* This should only be called if the buffer is not sent to the hardware
+ * yet, the hardware updates in use for us once its on the ring buffer.
+ */
+
+static int i830_freelist_put(drm_device_t *dev, drm_buf_t *buf)
+{
+ drm_i830_buf_priv_t *buf_priv = buf->dev_private;
+ int used;
+
+ /* In use is already a pointer */
+ used = cmpxchg(buf_priv->in_use, I830_BUF_CLIENT, I830_BUF_FREE);
+ if(used != I830_BUF_CLIENT) {
+ DRM_ERROR("Freeing buffer thats not in use : %d\n", buf->idx);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static struct file_operations i830_buffer_fops = {
+ open: DRM(open),
+ flush: DRM(flush),
+ release: DRM(release),
+ ioctl: DRM(ioctl),
+ mmap: i830_mmap_buffers,
+ read: DRM(read),
+ fasync: DRM(fasync),
+ poll: DRM(poll),
+};
+
+int i830_mmap_buffers(struct file *filp, struct vm_area_struct *vma)
+{
+ drm_file_t *priv = filp->private_data;
+ drm_device_t *dev;
+ drm_i830_private_t *dev_priv;
+ drm_buf_t *buf;
+ drm_i830_buf_priv_t *buf_priv;
+
+ lock_kernel();
+ dev = priv->dev;
+ dev_priv = dev->dev_private;
+ buf = dev_priv->mmap_buffer;
+ buf_priv = buf->dev_private;
+
+ vma->vm_flags |= (VM_IO | VM_DONTCOPY);
+ vma->vm_file = filp;
+
+ buf_priv->currently_mapped = I830_BUF_MAPPED;
+ unlock_kernel();
+
+ if (remap_page_range(vma->vm_start,
+ VM_OFFSET(vma),
+ vma->vm_end - vma->vm_start,
+ vma->vm_page_prot)) return -EAGAIN;
+ return 0;
+}
+
+static int i830_map_buffer(drm_buf_t *buf, struct file *filp)
+{
+ drm_file_t *priv = filp->private_data;
+ drm_device_t *dev = priv->dev;
+ drm_i830_buf_priv_t *buf_priv = buf->dev_private;
+ drm_i830_private_t *dev_priv = dev->dev_private;
+ struct file_operations *old_fops;
+ int retcode = 0;
+
+ if(buf_priv->currently_mapped == I830_BUF_MAPPED) return -EINVAL;
+
+ if(VM_DONTCOPY != 0) {
+#if LINUX_VERSION_CODE <= 0x020402
+ down( &current->mm->mmap_sem );
+#else
+ down_write( &current->mm->mmap_sem );
+#endif
+ old_fops = filp->f_op;
+ filp->f_op = &i830_buffer_fops;
+ dev_priv->mmap_buffer = buf;
+ buf_priv->virtual = (void *)do_mmap(filp, 0, buf->total,
+ PROT_READ|PROT_WRITE,
+ MAP_SHARED,
+ buf->bus_address);
+ dev_priv->mmap_buffer = NULL;
+ filp->f_op = old_fops;
+ if ((unsigned long)buf_priv->virtual > -1024UL) {
+ /* Real error */
+ DRM_DEBUG("mmap error\n");
+ retcode = (signed int)buf_priv->virtual;
+ buf_priv->virtual = 0;
+ }
+#if LINUX_VERSION_CODE <= 0x020402
+ up( &current->mm->mmap_sem );
+#else
+ up_write( &current->mm->mmap_sem );
+#endif
+ } else {
+ buf_priv->virtual = buf_priv->kernel_virtual;
+ buf_priv->currently_mapped = I830_BUF_MAPPED;
+ }
+ return retcode;
+}
+
+static int i830_unmap_buffer(drm_buf_t *buf)
+{
+ drm_i830_buf_priv_t *buf_priv = buf->dev_private;
+ int retcode = 0;
+
+ if(VM_DONTCOPY != 0) {
+ if(buf_priv->currently_mapped != I830_BUF_MAPPED)
+ return -EINVAL;
+#if LINUX_VERSION_CODE <= 0x020402
+ down( &current->mm->mmap_sem );
+#else
+ down_write( &current->mm->mmap_sem );
+#endif
+#if LINUX_VERSION_CODE < 0x020399
+ retcode = do_munmap((unsigned long)buf_priv->virtual,
+ (size_t) buf->total);
+#else
+ retcode = do_munmap(current->mm,
+ (unsigned long)buf_priv->virtual,
+ (size_t) buf->total);
+#endif
+#if LINUX_VERSION_CODE <= 0x020402
+ up( &current->mm->mmap_sem );
+#else
+ up_write( &current->mm->mmap_sem );
+#endif
+ }
+ buf_priv->currently_mapped = I830_BUF_UNMAPPED;
+ buf_priv->virtual = 0;
+
+ return retcode;
+}
+
+static int i830_dma_get_buffer(drm_device_t *dev, drm_i830_dma_t *d,
+ struct file *filp)
+{
+ drm_file_t *priv = filp->private_data;
+ drm_buf_t *buf;
+ drm_i830_buf_priv_t *buf_priv;
+ int retcode = 0;
+
+ buf = i830_freelist_get(dev);
+ if (!buf) {
+ retcode = -ENOMEM;
+ DRM_DEBUG("retcode=%d\n", retcode);
+ return retcode;
+ }
+
+ retcode = i830_map_buffer(buf, filp);
+ if(retcode) {
+ i830_freelist_put(dev, buf);
+ DRM_DEBUG("mapbuf failed, retcode %d\n", retcode);
+ return retcode;
+ }
+ buf->pid = priv->pid;
+ buf_priv = buf->dev_private;
+ d->granted = 1;
+ d->request_idx = buf->idx;
+ d->request_size = buf->total;
+ d->virtual = buf_priv->virtual;
+
+ return retcode;
+}
+
+static unsigned long i830_alloc_page(drm_device_t *dev)
+{
+ unsigned long address;
+
+ address = __get_free_page(GFP_KERNEL);
+ if(address == 0UL)
+ return 0;
+
+ atomic_inc(&virt_to_page(address)->count);
+ set_bit(PG_locked, &virt_to_page(address)->flags);
+
+ return address;
+}
+
+static void i830_free_page(drm_device_t *dev, unsigned long page)
+{
+ if(page == 0UL)
+ return;
+
+ atomic_dec(&virt_to_page(page)->count);
+ clear_bit(PG_locked, &virt_to_page(page)->flags);
+ wake_up(&virt_to_page(page)->wait);
+ free_page(page);
+ return;
+}
+
+static int i830_dma_cleanup(drm_device_t *dev)
+{
+ drm_device_dma_t *dma = dev->dma;
+
+ if(dev->dev_private) {
+ int i;
+ drm_i830_private_t *dev_priv =
+ (drm_i830_private_t *) dev->dev_private;
+
+ if(dev_priv->ring.virtual_start) {
+ DRM(ioremapfree)((void *) dev_priv->ring.virtual_start,
+ dev_priv->ring.Size);
+ }
+ if(dev_priv->hw_status_page != 0UL) {
+ i830_free_page(dev, dev_priv->hw_status_page);
+ /* Need to rewrite hardware status page */
+ I830_WRITE(0x02080, 0x1ffff000);
+ }
+ DRM(free)(dev->dev_private, sizeof(drm_i830_private_t),
+ DRM_MEM_DRIVER);
+ dev->dev_private = NULL;
+
+ for (i = 0; i < dma->buf_count; i++) {
+ drm_buf_t *buf = dma->buflist[ i ];
+ drm_i830_buf_priv_t *buf_priv = buf->dev_private;
+ DRM(ioremapfree)(buf_priv->kernel_virtual, buf->total);
+ }
+ }
+ return 0;
+}
+
+static int i830_wait_ring(drm_device_t *dev, int n)
+{
+ drm_i830_private_t *dev_priv = dev->dev_private;
+ drm_i830_ring_buffer_t *ring = &(dev_priv->ring);
+ int iters = 0;
+ unsigned long end;
+ unsigned int last_head = I830_READ(LP_RING + RING_HEAD) & HEAD_ADDR;
+
+ end = jiffies + (HZ*3);
+ while (ring->space < n) {
+ int i;
+
+ ring->head = I830_READ(LP_RING + RING_HEAD) & HEAD_ADDR;
+ ring->space = ring->head - (ring->tail+8);
+ if (ring->space < 0) ring->space += ring->Size;
+
+ if (ring->head != last_head) {
+ end = jiffies + (HZ*3);
+ last_head = ring->head;
+ }
+
+ iters++;
+ if((signed)(end - jiffies) <= 0) {
+ DRM_ERROR("space: %d wanted %d\n", ring->space, n);
+ DRM_ERROR("lockup\n");
+ goto out_wait_ring;
+ }
+
+ for (i = 0 ; i < 2000 ; i++) ;
+ }
+
+out_wait_ring:
+ return iters;
+}
+
+static void i830_kernel_lost_context(drm_device_t *dev)
+{
+ drm_i830_private_t *dev_priv = dev->dev_private;
+ drm_i830_ring_buffer_t *ring = &(dev_priv->ring);
+
+ ring->head = I830_READ(LP_RING + RING_HEAD) & HEAD_ADDR;
+ ring->tail = I830_READ(LP_RING + RING_TAIL);
+ ring->space = ring->head - (ring->tail+8);
+ if (ring->space < 0) ring->space += ring->Size;
+}
+
+static int i830_freelist_init(drm_device_t *dev, drm_i830_private_t *dev_priv)
+{
+ drm_device_dma_t *dma = dev->dma;
+ int my_idx = 36;
+ u32 *hw_status = (u32 *)(dev_priv->hw_status_page + my_idx);
+ int i;
+
+ if(dma->buf_count > 1019) {
+ /* Not enough space in the status page for the freelist */
+ return -EINVAL;
+ }
+
+ for (i = 0; i < dma->buf_count; i++) {
+ drm_buf_t *buf = dma->buflist[ i ];
+ drm_i830_buf_priv_t *buf_priv = buf->dev_private;
+
+ buf_priv->in_use = hw_status++;
+ buf_priv->my_use_idx = my_idx;
+ my_idx += 4;
+
+ *buf_priv->in_use = I830_BUF_FREE;
+
+ buf_priv->kernel_virtual = DRM(ioremap)(buf->bus_address,
+ buf->total);
+ }
+ return 0;
+}
+
+static int i830_dma_initialize(drm_device_t *dev,
+ drm_i830_private_t *dev_priv,
+ drm_i830_init_t *init)
+{
+ struct list_head *list;
+
+ memset(dev_priv, 0, sizeof(drm_i830_private_t));
+
+ list_for_each(list, &dev->maplist->head) {
+ drm_map_list_t *r_list = (drm_map_list_t *)list;
+ if( r_list->map &&
+ r_list->map->type == _DRM_SHM &&
+ r_list->map->flags & _DRM_CONTAINS_LOCK ) {
+ dev_priv->sarea_map = r_list->map;
+ break;
+ }
+ }
+
+ if(!dev_priv->sarea_map) {
+ dev->dev_private = (void *)dev_priv;
+ i830_dma_cleanup(dev);
+ DRM_ERROR("can not find sarea!\n");
+ return -EINVAL;
+ }
+ DRM_FIND_MAP( dev_priv->mmio_map, init->mmio_offset );
+ if(!dev_priv->mmio_map) {
+ dev->dev_private = (void *)dev_priv;
+ i830_dma_cleanup(dev);
+ DRM_ERROR("can not find mmio map!\n");
+ return -EINVAL;
+ }
+ DRM_FIND_MAP( dev_priv->buffer_map, init->buffers_offset );
+ if(!dev_priv->buffer_map) {
+ dev->dev_private = (void *)dev_priv;
+ i830_dma_cleanup(dev);
+ DRM_ERROR("can not find dma buffer map!\n");
+ return -EINVAL;
+ }
+
+ dev_priv->sarea_priv = (drm_i830_sarea_t *)
+ ((u8 *)dev_priv->sarea_map->handle +
+ init->sarea_priv_offset);
+
+ atomic_set(&dev_priv->flush_done, 0);
+ init_waitqueue_head(&dev_priv->flush_queue);
+
+ dev_priv->ring.Start = init->ring_start;
+ dev_priv->ring.End = init->ring_end;
+ dev_priv->ring.Size = init->ring_size;
+
+ dev_priv->ring.virtual_start = DRM(ioremap)(dev->agp->base +
+ init->ring_start,
+ init->ring_size);
+
+ if (dev_priv->ring.virtual_start == NULL) {
+ dev->dev_private = (void *) dev_priv;
+ i830_dma_cleanup(dev);
+ DRM_ERROR("can not ioremap virtual address for"
+ " ring buffer\n");
+ return -ENOMEM;
+ }
+
+ dev_priv->ring.tail_mask = dev_priv->ring.Size - 1;
+
+ dev_priv->w = init->w;
+ dev_priv->h = init->h;
+ dev_priv->pitch = init->pitch;
+ dev_priv->back_offset = init->back_offset;
+ dev_priv->depth_offset = init->depth_offset;
+
+ dev_priv->front_di1 = init->front_offset | init->pitch_bits;
+ dev_priv->back_di1 = init->back_offset | init->pitch_bits;
+ dev_priv->zi1 = init->depth_offset | init->pitch_bits;
+
+ dev_priv->cpp = init->cpp;
+ /* We are using seperate values as placeholders for mechanisms for
+ * private backbuffer/depthbuffer usage.
+ */
+
+ dev_priv->back_pitch = init->back_pitch;
+ dev_priv->depth_pitch = init->depth_pitch;
+
+ /* Program Hardware Status Page */
+ dev_priv->hw_status_page = i830_alloc_page(dev);
+ if(dev_priv->hw_status_page == 0UL) {
+ dev->dev_private = (void *)dev_priv;
+ i830_dma_cleanup(dev);
+ DRM_ERROR("Can not allocate hardware status page\n");
+ return -ENOMEM;
+ }
+ memset((void *) dev_priv->hw_status_page, 0, PAGE_SIZE);
+ DRM_DEBUG("hw status page @ %lx\n", dev_priv->hw_status_page);
+
+ I830_WRITE(0x02080, virt_to_bus((void *)dev_priv->hw_status_page));
+ DRM_DEBUG("Enabled hardware status page\n");
+
+ /* Now we need to init our freelist */
+ if(i830_freelist_init(dev, dev_priv) != 0) {
+ dev->dev_private = (void *)dev_priv;
+ i830_dma_cleanup(dev);
+ DRM_ERROR("Not enough space in the status page for"
+ " the freelist\n");
+ return -ENOMEM;
+ }
+ dev->dev_private = (void *)dev_priv;
+
+ return 0;
+}
+
+int i830_dma_init(struct inode *inode, struct file *filp,
+ unsigned int cmd, unsigned long arg)
+{
+ drm_file_t *priv = filp->private_data;
+ drm_device_t *dev = priv->dev;
+ drm_i830_private_t *dev_priv;
+ drm_i830_init_t init;
+ int retcode = 0;
+
+ if (copy_from_user(&init, (drm_i830_init_t *)arg, sizeof(init)))
+ return -EFAULT;
+
+ switch(init.func) {
+ case I830_INIT_DMA:
+ dev_priv = DRM(alloc)(sizeof(drm_i830_private_t),
+ DRM_MEM_DRIVER);
+ if(dev_priv == NULL) return -ENOMEM;
+ retcode = i830_dma_initialize(dev, dev_priv, &init);
+ break;
+ case I830_CLEANUP_DMA:
+ retcode = i830_dma_cleanup(dev);
+ break;
+ default:
+ retcode = -EINVAL;
+ break;
+ }
+
+ return retcode;
+}
+
+/* Most efficient way to verify state for the i830 is as it is
+ * emitted. Non-conformant state is silently dropped.
+ *
+ * Use 'volatile' & local var tmp to force the emitted values to be
+ * identical to the verified ones.
+ */
+static void i830EmitContextVerified( drm_device_t *dev,
+ volatile unsigned int *code )
+{
+ drm_i830_private_t *dev_priv = dev->dev_private;
+ int i, j = 0;
+ unsigned int tmp;
+ RING_LOCALS;
+
+ BEGIN_LP_RING( I830_CTX_SETUP_SIZE );
+ for ( i = 0 ; i < I830_CTX_SETUP_SIZE ; i++ ) {
+ tmp = code[i];
+
+#if 0
+ if ((tmp & (7<<29)) == (3<<29) &&
+ (tmp & (0x1f<<24)) < (0x1d<<24)) {
+ OUT_RING( tmp );
+ j++;
+ } else {
+ printk("Skipping %d\n", i);
+ }
+#else
+ OUT_RING( tmp );
+ j++;
+#endif
+ }
+
+ if (j & 1)
+ OUT_RING( 0 );
+
+ ADVANCE_LP_RING();
+}
+
+static void i830EmitTexVerified( drm_device_t *dev,
+ volatile unsigned int *code )
+{
+ drm_i830_private_t *dev_priv = dev->dev_private;
+ int i, j = 0;
+ unsigned int tmp;
+ RING_LOCALS;
+
+ BEGIN_LP_RING( I830_TEX_SETUP_SIZE );
+
+ OUT_RING( GFX_OP_MAP_INFO );
+ OUT_RING( code[I830_TEXREG_MI1] );
+ OUT_RING( code[I830_TEXREG_MI2] );
+ OUT_RING( code[I830_TEXREG_MI3] );
+ OUT_RING( code[I830_TEXREG_MI4] );
+ OUT_RING( code[I830_TEXREG_MI5] );
+
+ for ( i = 6 ; i < I830_TEX_SETUP_SIZE ; i++ ) {
+ tmp = code[i];
+ OUT_RING( tmp );
+ j++;
+ }
+
+ if (j & 1)
+ OUT_RING( 0 );
+
+ ADVANCE_LP_RING();
+}
+
+static void i830EmitTexBlendVerified( drm_device_t *dev,
+ volatile unsigned int *code,
+ volatile unsigned int num)
+{
+ drm_i830_private_t *dev_priv = dev->dev_private;
+ int i, j = 0;
+ unsigned int tmp;
+ RING_LOCALS;
+
+ BEGIN_LP_RING( num );
+
+ for ( i = 0 ; i < num ; i++ ) {
+ tmp = code[i];
+ OUT_RING( tmp );
+ j++;
+ }
+
+ if (j & 1)
+ OUT_RING( 0 );
+
+ ADVANCE_LP_RING();
+}
+
+static void i830EmitTexPalette( drm_device_t *dev,
+ unsigned int *palette,
+ int number,
+ int is_shared )
+{
+ drm_i830_private_t *dev_priv = dev->dev_private;
+ int i;
+ RING_LOCALS;
+
+ BEGIN_LP_RING( 258 );
+
+ if(is_shared == 1) {
+ OUT_RING(CMD_OP_MAP_PALETTE_LOAD |
+ MAP_PALETTE_NUM(0) |
+ MAP_PALETTE_BOTH);
+ } else {
+ OUT_RING(CMD_OP_MAP_PALETTE_LOAD | MAP_PALETTE_NUM(number));
+ }
+ for(i = 0; i < 256; i++) {
+ OUT_RING(palette[i]);
+ }
+ OUT_RING(0);
+}
+
+/* Need to do some additional checking when setting the dest buffer.
+ */
+static void i830EmitDestVerified( drm_device_t *dev,
+ volatile unsigned int *code )
+{
+ drm_i830_private_t *dev_priv = dev->dev_private;
+ unsigned int tmp;
+ RING_LOCALS;
+
+ BEGIN_LP_RING( I830_DEST_SETUP_SIZE + 6 );
+
+ tmp = code[I830_DESTREG_CBUFADDR];
+ if (tmp == dev_priv->front_di1) {
+ /* Don't use fence when front buffer rendering */
+ OUT_RING( CMD_OP_DESTBUFFER_INFO );
+ OUT_RING( BUF_3D_ID_COLOR_BACK |
+ BUF_3D_PITCH(dev_priv->back_pitch * dev_priv->cpp) );
+ OUT_RING( tmp );
+
+ OUT_RING( CMD_OP_DESTBUFFER_INFO );
+ OUT_RING( BUF_3D_ID_DEPTH |
+ BUF_3D_PITCH(dev_priv->depth_pitch * dev_priv->cpp));
+ OUT_RING( dev_priv->zi1 );
+ } else if(tmp == dev_priv->back_di1) {
+ OUT_RING( CMD_OP_DESTBUFFER_INFO );
+ OUT_RING( BUF_3D_ID_COLOR_BACK |
+ BUF_3D_PITCH(dev_priv->back_pitch * dev_priv->cpp) |
+ BUF_3D_USE_FENCE);
+ OUT_RING( tmp );
+
+ OUT_RING( CMD_OP_DESTBUFFER_INFO );
+ OUT_RING( BUF_3D_ID_DEPTH | BUF_3D_USE_FENCE |
+ BUF_3D_PITCH(dev_priv->depth_pitch * dev_priv->cpp));
+ OUT_RING( dev_priv->zi1 );
+ } else {
+ DRM_DEBUG("bad di1 %x (allow %x or %x)\n",
+ tmp, dev_priv->front_di1, dev_priv->back_di1);
+ }
+
+ /* invarient:
+ */
+
+
+ OUT_RING( GFX_OP_DESTBUFFER_VARS );
+ OUT_RING( code[I830_DESTREG_DV1] );
+
+ OUT_RING( GFX_OP_DRAWRECT_INFO );
+ OUT_RING( code[I830_DESTREG_DR1] );
+ OUT_RING( code[I830_DESTREG_DR2] );
+ OUT_RING( code[I830_DESTREG_DR3] );
+ OUT_RING( code[I830_DESTREG_DR4] );
+
+ /* Need to verify this */
+ tmp = code[I830_DESTREG_SENABLE];
+ if((tmp & ~0x3) == GFX_OP_SCISSOR_ENABLE) {
+ OUT_RING( tmp );
+ } else {
+ DRM_DEBUG("bad scissor enable\n");
+ OUT_RING( 0 );
+ }
+
+ OUT_RING( code[I830_DESTREG_SENABLE] );
+
+ OUT_RING( GFX_OP_SCISSOR_RECT );
+ OUT_RING( code[I830_DESTREG_SR1] );
+ OUT_RING( code[I830_DESTREG_SR2] );
+
+ ADVANCE_LP_RING();
+}
+
+static void i830EmitState( drm_device_t *dev )
+{
+ drm_i830_private_t *dev_priv = dev->dev_private;
+ drm_i830_sarea_t *sarea_priv = dev_priv->sarea_priv;
+ unsigned int dirty = sarea_priv->dirty;
+
+ if (dirty & I830_UPLOAD_BUFFERS) {
+ i830EmitDestVerified( dev, sarea_priv->BufferState );
+ sarea_priv->dirty &= ~I830_UPLOAD_BUFFERS;
+ }
+
+ if (dirty & I830_UPLOAD_CTX) {
+ i830EmitContextVerified( dev, sarea_priv->ContextState );
+ sarea_priv->dirty &= ~I830_UPLOAD_CTX;
+ }
+
+ if (dirty & I830_UPLOAD_TEX0) {
+ i830EmitTexVerified( dev, sarea_priv->TexState[0] );
+ sarea_priv->dirty &= ~I830_UPLOAD_TEX0;
+ }
+
+ if (dirty & I830_UPLOAD_TEX1) {
+ i830EmitTexVerified( dev, sarea_priv->TexState[1] );
+ sarea_priv->dirty &= ~I830_UPLOAD_TEX1;
+ }
+
+ if (dirty & I830_UPLOAD_TEXBLEND0) {
+ i830EmitTexBlendVerified( dev, sarea_priv->TexBlendState[0],
+ sarea_priv->TexBlendStateWordsUsed[0]);
+ sarea_priv->dirty &= ~I830_UPLOAD_TEXBLEND0;
+ }
+
+ if (dirty & I830_UPLOAD_TEXBLEND1) {
+ i830EmitTexBlendVerified( dev, sarea_priv->TexBlendState[1],
+ sarea_priv->TexBlendStateWordsUsed[1]);
+ sarea_priv->dirty &= ~I830_UPLOAD_TEXBLEND1;
+ }
+
+ if (dirty & I830_UPLOAD_TEX_PALETTE_SHARED) {
+ i830EmitTexPalette(dev, sarea_priv->Palette[0], 0, 1);
+ } else {
+ if (dirty & I830_UPLOAD_TEX_PALETTE_N(0)) {
+ i830EmitTexPalette(dev, sarea_priv->Palette[0], 0, 0);
+ sarea_priv->dirty &= ~I830_UPLOAD_TEX_PALETTE_N(0);
+ }
+ if (dirty & I830_UPLOAD_TEX_PALETTE_N(1)) {
+ i830EmitTexPalette(dev, sarea_priv->Palette[1], 1, 0);
+ sarea_priv->dirty &= ~I830_UPLOAD_TEX_PALETTE_N(1);
+ }
+ }
+}
+
+static void i830_dma_dispatch_clear( drm_device_t *dev, int flags,
+ unsigned int clear_color,
+ unsigned int clear_zval,
+ unsigned int clear_depthmask)
+{
+ drm_i830_private_t *dev_priv = dev->dev_private;
+ drm_i830_sarea_t *sarea_priv = dev_priv->sarea_priv;
+ int nbox = sarea_priv->nbox;
+ drm_clip_rect_t *pbox = sarea_priv->boxes;
+ int pitch = dev_priv->pitch;
+ int cpp = dev_priv->cpp;
+ int i;
+ unsigned int BR13, CMD, D_CMD;
+ RING_LOCALS;
+
+ i830_kernel_lost_context(dev);
+
+ switch(cpp) {
+ case 2:
+ BR13 = (0xF0 << 16) | (pitch * cpp) | (1<<24);
+ D_CMD = CMD = XY_COLOR_BLT_CMD;
+ break;
+ case 4:
+ BR13 = (0xF0 << 16) | (pitch * cpp) | (1<<24) | (1<<25);
+ CMD = (XY_COLOR_BLT_CMD | XY_COLOR_BLT_WRITE_ALPHA |
+ XY_COLOR_BLT_WRITE_RGB);
+ D_CMD = XY_COLOR_BLT_CMD;
+ if(clear_depthmask & 0x00ffffff)
+ D_CMD |= XY_COLOR_BLT_WRITE_RGB;
+ if(clear_depthmask & 0xff000000)
+ D_CMD |= XY_COLOR_BLT_WRITE_ALPHA;
+ break;
+ default:
+ BR13 = (0xF0 << 16) | (pitch * cpp) | (1<<24);
+ D_CMD = CMD = XY_COLOR_BLT_CMD;
+ break;
+ }
+
+ if (nbox > I830_NR_SAREA_CLIPRECTS)
+ nbox = I830_NR_SAREA_CLIPRECTS;
+
+ for (i = 0 ; i < nbox ; i++, pbox++) {
+ if (pbox->x1 > pbox->x2 ||
+ pbox->y1 > pbox->y2 ||
+ pbox->x2 > dev_priv->w ||
+ pbox->y2 > dev_priv->h)
+ continue;
+
+ if ( flags & I830_FRONT ) {
+ DRM_DEBUG("clear front\n");
+ BEGIN_LP_RING( 6 );
+ OUT_RING( CMD );
+ OUT_RING( BR13 );
+ OUT_RING( (pbox->y1 << 16) | pbox->x1 );
+ OUT_RING( (pbox->y2 << 16) | pbox->x2 );
+ OUT_RING( 0 );
+ OUT_RING( clear_color );
+ ADVANCE_LP_RING();
+ }
+
+ if ( flags & I830_BACK ) {
+ DRM_DEBUG("clear back\n");
+ BEGIN_LP_RING( 6 );
+ OUT_RING( CMD );
+ OUT_RING( BR13 );
+ OUT_RING( (pbox->y1 << 16) | pbox->x1 );
+ OUT_RING( (pbox->y2 << 16) | pbox->x2 );
+ OUT_RING( dev_priv->back_offset );
+ OUT_RING( clear_color );
+ ADVANCE_LP_RING();
+ }
+
+ if ( flags & I830_DEPTH ) {
+ DRM_DEBUG("clear depth\n");
+ BEGIN_LP_RING( 6 );
+ OUT_RING( D_CMD );
+ OUT_RING( BR13 );
+ OUT_RING( (pbox->y1 << 16) | pbox->x1 );
+ OUT_RING( (pbox->y2 << 16) | pbox->x2 );
+ OUT_RING( dev_priv->depth_offset );
+ OUT_RING( clear_zval );
+ ADVANCE_LP_RING();
+ }
+ }
+}
+
+static void i830_dma_dispatch_swap( drm_device_t *dev )
+{
+ drm_i830_private_t *dev_priv = dev->dev_private;
+ drm_i830_sarea_t *sarea_priv = dev_priv->sarea_priv;
+ int nbox = sarea_priv->nbox;
+ drm_clip_rect_t *pbox = sarea_priv->boxes;
+ int pitch = dev_priv->pitch;
+ int cpp = dev_priv->cpp;
+ int ofs = dev_priv->back_offset;
+ int i;
+ unsigned int CMD, BR13;
+ RING_LOCALS;
+
+ DRM_DEBUG("swapbuffers\n");
+
+ switch(cpp) {
+ case 2:
+ BR13 = (pitch * cpp) | (0xCC << 16) | (1<<24);
+ CMD = XY_SRC_COPY_BLT_CMD;
+ break;
+ case 4:
+ BR13 = (pitch * cpp) | (0xCC << 16) | (1<<24) | (1<<25);
+ CMD = (XY_SRC_COPY_BLT_CMD | XY_SRC_COPY_BLT_WRITE_ALPHA |
+ XY_SRC_COPY_BLT_WRITE_RGB);
+ break;
+ default:
+ BR13 = (pitch * cpp) | (0xCC << 16) | (1<<24);
+ CMD = XY_SRC_COPY_BLT_CMD;
+ break;
+ }
+
+ i830_kernel_lost_context(dev);
+
+ if (nbox > I830_NR_SAREA_CLIPRECTS)
+ nbox = I830_NR_SAREA_CLIPRECTS;
+
+ for (i = 0 ; i < nbox; i++, pbox++)
+ {
+ if (pbox->x1 > pbox->x2 ||
+ pbox->y1 > pbox->y2 ||
+ pbox->x2 > dev_priv->w ||
+ pbox->y2 > dev_priv->h)
+ continue;
+
+ DRM_DEBUG("dispatch swap %d,%d-%d,%d!\n",
+ pbox->x1, pbox->y1,
+ pbox->x2, pbox->y2);
+
+ BEGIN_LP_RING( 8 );
+ OUT_RING( CMD );
+ OUT_RING( BR13 );
+
+ OUT_RING( (pbox->y1 << 16) |
+ pbox->x1 );
+ OUT_RING( (pbox->y2 << 16) |
+ pbox->x2 );
+
+ OUT_RING( 0 /* front ofs always zero */ );
+ OUT_RING( (pbox->y1 << 16) |
+ pbox->x1 );
+
+ OUT_RING( BR13 & 0xffff );
+ OUT_RING( ofs );
+
+ ADVANCE_LP_RING();
+ }
+}
+
+
+static void i830_dma_dispatch_vertex(drm_device_t *dev,
+ drm_buf_t *buf,
+ int discard,
+ int used)
+{
+ drm_i830_private_t *dev_priv = dev->dev_private;
+ drm_i830_buf_priv_t *buf_priv = buf->dev_private;
+ drm_i830_sarea_t *sarea_priv = dev_priv->sarea_priv;
+ drm_clip_rect_t *box = sarea_priv->boxes;
+ int nbox = sarea_priv->nbox;
+ unsigned long address = (unsigned long)buf->bus_address;
+ unsigned long start = address - dev->agp->base;
+ int i = 0, u;
+ RING_LOCALS;
+
+ i830_kernel_lost_context(dev);
+
+ if (nbox > I830_NR_SAREA_CLIPRECTS)
+ nbox = I830_NR_SAREA_CLIPRECTS;
+
+ if (discard) {
+ u = cmpxchg(buf_priv->in_use, I830_BUF_CLIENT,
+ I830_BUF_HARDWARE);
+ if(u != I830_BUF_CLIENT) {
+ DRM_DEBUG("xxxx 2\n");
+ }
+ }
+
+ if (used > 4*1024)
+ used = 0;
+
+ if (sarea_priv->dirty)
+ i830EmitState( dev );
+
+ DRM_DEBUG("dispatch vertex addr 0x%lx, used 0x%x nbox %d\n",
+ address, used, nbox);
+
+ dev_priv->counter++;
+ DRM_DEBUG( "dispatch counter : %ld\n", dev_priv->counter);
+ DRM_DEBUG( "i830_dma_dispatch\n");
+ DRM_DEBUG( "start : %lx\n", start);
+ DRM_DEBUG( "used : %d\n", used);
+ DRM_DEBUG( "start + used - 4 : %ld\n", start + used - 4);
+
+ if (buf_priv->currently_mapped == I830_BUF_MAPPED) {
+ *(u32 *)buf_priv->virtual = (GFX_OP_PRIMITIVE |
+ sarea_priv->vertex_prim |
+ ((used/4)-2));
+
+ if (used & 4) {
+ *(u32 *)((u32)buf_priv->virtual + used) = 0;
+ used += 4;
+ }
+
+ i830_unmap_buffer(buf);
+ }
+
+ if (used) {
+ do {
+ if (i < nbox) {
+ BEGIN_LP_RING(6);
+ OUT_RING( GFX_OP_DRAWRECT_INFO );
+ OUT_RING( sarea_priv->BufferState[I830_DESTREG_DR1] );
+ OUT_RING( box[i].x1 | (box[i].y1<<16) );
+ OUT_RING( box[i].x2 | (box[i].y2<<16) );
+ OUT_RING( sarea_priv->BufferState[I830_DESTREG_DR4] );
+ OUT_RING( 0 );
+ ADVANCE_LP_RING();
+ }
+
+ BEGIN_LP_RING(4);
+
+ OUT_RING( MI_BATCH_BUFFER );
+ OUT_RING( start | MI_BATCH_NON_SECURE );
+ OUT_RING( start + used - 4 );
+ OUT_RING( 0 );
+ ADVANCE_LP_RING();
+
+ } while (++i < nbox);
+ }
+
+ BEGIN_LP_RING(10);
+ OUT_RING( CMD_STORE_DWORD_IDX );
+ OUT_RING( 20 );
+ OUT_RING( dev_priv->counter );
+ OUT_RING( 0 );
+
+ if (discard) {
+ OUT_RING( CMD_STORE_DWORD_IDX );
+ OUT_RING( buf_priv->my_use_idx );
+ OUT_RING( I830_BUF_FREE );
+ OUT_RING( 0 );
+ }
+
+ OUT_RING( CMD_REPORT_HEAD );
+ OUT_RING( 0 );
+ ADVANCE_LP_RING();
+}
+
+/* Interrupts are only for flushing */
+void i830_dma_service(int irq, void *device, struct pt_regs *regs)
+{
+ drm_device_t *dev = (drm_device_t *)device;
+ drm_i830_private_t *dev_priv = (drm_i830_private_t *)dev->dev_private;
+ u16 temp;
+
+ temp = I830_READ16(I830REG_INT_IDENTITY_R);
+ temp = temp & ~(0x6000);
+ if(temp != 0) I830_WRITE16(I830REG_INT_IDENTITY_R,
+ temp); /* Clear all interrupts */
+ else
+ return;
+
+ queue_task(&dev->tq, &tq_immediate);
+ mark_bh(IMMEDIATE_BH);
+}
+
+void DRM(dma_immediate_bh)(void *device)
+{
+ drm_device_t *dev = (drm_device_t *) device;
+ drm_i830_private_t *dev_priv = (drm_i830_private_t *)dev->dev_private;
+
+ atomic_set(&dev_priv->flush_done, 1);
+ wake_up_interruptible(&dev_priv->flush_queue);
+}
+
+static inline void i830_dma_emit_flush(drm_device_t *dev)
+{
+ drm_i830_private_t *dev_priv = dev->dev_private;
+ RING_LOCALS;
+
+ i830_kernel_lost_context(dev);
+
+ BEGIN_LP_RING(2);
+ OUT_RING( CMD_REPORT_HEAD );
+ OUT_RING( GFX_OP_USER_INTERRUPT );
+ ADVANCE_LP_RING();
+
+ i830_wait_ring( dev, dev_priv->ring.Size - 8 );
+ atomic_set(&dev_priv->flush_done, 1);
+ wake_up_interruptible(&dev_priv->flush_queue);
+}
+
+static inline void i830_dma_quiescent_emit(drm_device_t *dev)
+{
+ drm_i830_private_t *dev_priv = dev->dev_private;
+ RING_LOCALS;
+
+ i830_kernel_lost_context(dev);
+
+ BEGIN_LP_RING(4);
+ OUT_RING( INST_PARSER_CLIENT | INST_OP_FLUSH | INST_FLUSH_MAP_CACHE );
+ OUT_RING( CMD_REPORT_HEAD );
+ OUT_RING( 0 );
+ OUT_RING( GFX_OP_USER_INTERRUPT );
+ ADVANCE_LP_RING();
+
+ i830_wait_ring( dev, dev_priv->ring.Size - 8 );
+ atomic_set(&dev_priv->flush_done, 1);
+ wake_up_interruptible(&dev_priv->flush_queue);
+}
+
+void i830_dma_quiescent(drm_device_t *dev)
+{
+ DECLARE_WAITQUEUE(entry, current);
+ drm_i830_private_t *dev_priv = (drm_i830_private_t *)dev->dev_private;
+ unsigned long end;
+
+ if(dev_priv == NULL) {
+ return;
+ }
+ atomic_set(&dev_priv->flush_done, 0);
+ add_wait_queue(&dev_priv->flush_queue, &entry);
+ end = jiffies + (HZ*3);
+
+ for (;;) {
+ current->state = TASK_INTERRUPTIBLE;
+ i830_dma_quiescent_emit(dev);
+ if (atomic_read(&dev_priv->flush_done) == 1) break;
+ if((signed)(end - jiffies) <= 0) {
+ DRM_ERROR("lockup\n");
+ break;
+ }
+ schedule_timeout(HZ*3);
+ if (signal_pending(current)) {
+ break;
+ }
+ }
+
+ current->state = TASK_RUNNING;
+ remove_wait_queue(&dev_priv->flush_queue, &entry);
+
+ return;
+}
+
+static int i830_flush_queue(drm_device_t *dev)
+{
+ DECLARE_WAITQUEUE(entry, current);
+ drm_i830_private_t *dev_priv = (drm_i830_private_t *)dev->dev_private;
+ drm_device_dma_t *dma = dev->dma;
+ unsigned long end;
+ int i, ret = 0;
+
+ if(dev_priv == NULL) {
+ return 0;
+ }
+ atomic_set(&dev_priv->flush_done, 0);
+ add_wait_queue(&dev_priv->flush_queue, &entry);
+ end = jiffies + (HZ*3);
+ for (;;) {
+ current->state = TASK_INTERRUPTIBLE;
+ i830_dma_emit_flush(dev);
+ if (atomic_read(&dev_priv->flush_done) == 1) break;
+ if((signed)(end - jiffies) <= 0) {
+ DRM_ERROR("lockup\n");
+ break;
+ }
+ schedule_timeout(HZ*3);
+ if (signal_pending(current)) {
+ ret = -EINTR; /* Can't restart */
+ break;
+ }
+ }
+
+ current->state = TASK_RUNNING;
+ remove_wait_queue(&dev_priv->flush_queue, &entry);
+
+
+ for (i = 0; i < dma->buf_count; i++) {
+ drm_buf_t *buf = dma->buflist[ i ];
+ drm_i830_buf_priv_t *buf_priv = buf->dev_private;
+
+ int used = cmpxchg(buf_priv->in_use, I830_BUF_HARDWARE,
+ I830_BUF_FREE);
+
+ if (used == I830_BUF_HARDWARE)
+ DRM_DEBUG("reclaimed from HARDWARE\n");
+ if (used == I830_BUF_CLIENT)
+ DRM_DEBUG("still on client HARDWARE\n");
+ }
+
+ return ret;
+}
+
+/* Must be called with the lock held */
+void i830_reclaim_buffers(drm_device_t *dev, pid_t pid)
+{
+ drm_device_dma_t *dma = dev->dma;
+ int i;
+
+ if (!dma) return;
+ if (!dev->dev_private) return;
+ if (!dma->buflist) return;
+
+ i830_flush_queue(dev);
+
+ for (i = 0; i < dma->buf_count; i++) {
+ drm_buf_t *buf = dma->buflist[ i ];
+ drm_i830_buf_priv_t *buf_priv = buf->dev_private;
+
+ if (buf->pid == pid && buf_priv) {
+ int used = cmpxchg(buf_priv->in_use, I830_BUF_CLIENT,
+ I830_BUF_FREE);
+
+ if (used == I830_BUF_CLIENT)
+ DRM_DEBUG("reclaimed from client\n");
+ if(buf_priv->currently_mapped == I830_BUF_MAPPED)
+ buf_priv->currently_mapped = I830_BUF_UNMAPPED;
+ }
+ }
+}
+
+int i830_flush_ioctl(struct inode *inode, struct file *filp,
+ unsigned int cmd, unsigned long arg)
+{
+ drm_file_t *priv = filp->private_data;
+ drm_device_t *dev = priv->dev;
+
+ DRM_DEBUG("i830_flush_ioctl\n");
+ if(!_DRM_LOCK_IS_HELD(dev->lock.hw_lock->lock)) {
+ DRM_ERROR("i830_flush_ioctl called without lock held\n");
+ return -EINVAL;
+ }
+
+ i830_flush_queue(dev);
+ return 0;
+}
+
+int i830_dma_vertex(struct inode *inode, struct file *filp,
+ unsigned int cmd, unsigned long arg)
+{
+ drm_file_t *priv = filp->private_data;
+ drm_device_t *dev = priv->dev;
+ drm_device_dma_t *dma = dev->dma;
+ drm_i830_private_t *dev_priv = (drm_i830_private_t *)dev->dev_private;
+ u32 *hw_status = (u32 *)dev_priv->hw_status_page;
+ drm_i830_sarea_t *sarea_priv = (drm_i830_sarea_t *)
+ dev_priv->sarea_priv;
+ drm_i830_vertex_t vertex;
+
+ if (copy_from_user(&vertex, (drm_i830_vertex_t *)arg, sizeof(vertex)))
+ return -EFAULT;
+
+ if(!_DRM_LOCK_IS_HELD(dev->lock.hw_lock->lock)) {
+ DRM_ERROR("i830_dma_vertex called without lock held\n");
+ return -EINVAL;
+ }
+
+ DRM_DEBUG("i830 dma vertex, idx %d used %d discard %d\n",
+ vertex.idx, vertex.used, vertex.discard);
+
+ if(vertex.idx < 0 || vertex.idx > dma->buf_count) return -EINVAL;
+
+ i830_dma_dispatch_vertex( dev,
+ dma->buflist[ vertex.idx ],
+ vertex.discard, vertex.used );
+
+ sarea_priv->last_enqueue = dev_priv->counter-1;
+ sarea_priv->last_dispatch = (int) hw_status[5];
+
+ return 0;
+}
+
+int i830_clear_bufs(struct inode *inode, struct file *filp,
+ unsigned int cmd, unsigned long arg)
+{
+ drm_file_t *priv = filp->private_data;
+ drm_device_t *dev = priv->dev;
+ drm_i830_clear_t clear;
+
+ if (copy_from_user(&clear, (drm_i830_clear_t *)arg, sizeof(clear)))
+ return -EFAULT;
+
+ if(!_DRM_LOCK_IS_HELD(dev->lock.hw_lock->lock)) {
+ DRM_ERROR("i830_clear_bufs called without lock held\n");
+ return -EINVAL;
+ }
+
+ /* GH: Someone's doing nasty things... */
+ if (!dev->dev_private) {
+ return -EINVAL;
+ }
+
+ i830_dma_dispatch_clear( dev, clear.flags,
+ clear.clear_color,
+ clear.clear_depth,
+ clear.clear_depthmask);
+ return 0;
+}
+
+int i830_swap_bufs(struct inode *inode, struct file *filp,
+ unsigned int cmd, unsigned long arg)
+{
+ drm_file_t *priv = filp->private_data;
+ drm_device_t *dev = priv->dev;
+
+ DRM_DEBUG("i830_swap_bufs\n");
+
+ if(!_DRM_LOCK_IS_HELD(dev->lock.hw_lock->lock)) {
+ DRM_ERROR("i830_swap_buf called without lock held\n");
+ return -EINVAL;
+ }
+
+ i830_dma_dispatch_swap( dev );
+ return 0;
+}
+
+int i830_getage(struct inode *inode, struct file *filp, unsigned int cmd,
+ unsigned long arg)
+{
+ drm_file_t *priv = filp->private_data;
+ drm_device_t *dev = priv->dev;
+ drm_i830_private_t *dev_priv = (drm_i830_private_t *)dev->dev_private;
+ u32 *hw_status = (u32 *)dev_priv->hw_status_page;
+ drm_i830_sarea_t *sarea_priv = (drm_i830_sarea_t *)
+ dev_priv->sarea_priv;
+
+ sarea_priv->last_dispatch = (int) hw_status[5];
+ return 0;
+}
+
+int i830_getbuf(struct inode *inode, struct file *filp, unsigned int cmd,
+ unsigned long arg)
+{
+ drm_file_t *priv = filp->private_data;
+ drm_device_t *dev = priv->dev;
+ int retcode = 0;
+ drm_i830_dma_t d;
+ drm_i830_private_t *dev_priv = (drm_i830_private_t *)dev->dev_private;
+ u32 *hw_status = (u32 *)dev_priv->hw_status_page;
+ drm_i830_sarea_t *sarea_priv = (drm_i830_sarea_t *)
+ dev_priv->sarea_priv;
+
+ DRM_DEBUG("getbuf\n");
+ if (copy_from_user(&d, (drm_i830_dma_t *)arg, sizeof(d)))
+ return -EFAULT;
+
+ if(!_DRM_LOCK_IS_HELD(dev->lock.hw_lock->lock)) {
+ DRM_ERROR("i830_dma called without lock held\n");
+ return -EINVAL;
+ }
+
+ d.granted = 0;
+
+ retcode = i830_dma_get_buffer(dev, &d, filp);
+
+ DRM_DEBUG("i830_dma: %d returning %d, granted = %d\n",
+ current->pid, retcode, d.granted);
+
+ if (copy_to_user((drm_dma_t *)arg, &d, sizeof(d)))
+ return -EFAULT;
+ sarea_priv->last_dispatch = (int) hw_status[5];
+
+ return retcode;
+}
+
+int i830_copybuf(struct inode *inode, struct file *filp, unsigned int cmd,
+ unsigned long arg)
+{
+ drm_file_t *priv = filp->private_data;
+ drm_device_t *dev = priv->dev;
+ drm_i830_copy_t d;
+ drm_i830_private_t *dev_priv = (drm_i830_private_t *)dev->dev_private;
+ u32 *hw_status = (u32 *)dev_priv->hw_status_page;
+ drm_i830_sarea_t *sarea_priv = (drm_i830_sarea_t *)
+ dev_priv->sarea_priv;
+ drm_buf_t *buf;
+ drm_i830_buf_priv_t *buf_priv;
+ drm_device_dma_t *dma = dev->dma;
+
+ if(!_DRM_LOCK_IS_HELD(dev->lock.hw_lock->lock)) {
+ DRM_ERROR("i830_dma called without lock held\n");
+ return -EINVAL;
+ }
+
+ if (copy_from_user(&d, (drm_i830_copy_t *)arg, sizeof(d)))
+ return -EFAULT;
+
+ if(d.idx < 0 || d.idx > dma->buf_count) return -EINVAL;
+ buf = dma->buflist[ d.idx ];
+ buf_priv = buf->dev_private;
+ if (buf_priv->currently_mapped != I830_BUF_MAPPED) return -EPERM;
+
+ if(d.used < 0 || d.used > buf->total) return -EINVAL;
+
+ if (copy_from_user(buf_priv->virtual, d.address, d.used))
+ return -EFAULT;
+
+ sarea_priv->last_dispatch = (int) hw_status[5];
+
+ return 0;
+}
+
+int i830_docopy(struct inode *inode, struct file *filp, unsigned int cmd,
+ unsigned long arg)
+{
+ if(VM_DONTCOPY == 0) return 1;
+ return 0;
+}
diff --git a/linux-core/i830_drm.h b/linux-core/i830_drm.h
new file mode 100644
index 00000000..e4a2a257
--- /dev/null
+++ b/linux-core/i830_drm.h
@@ -0,0 +1,238 @@
+#ifndef _I830_DRM_H_
+#define _I830_DRM_H_
+
+/* WARNING: These defines must be the same as what the Xserver uses.
+ * if you change them, you must change the defines in the Xserver.
+ */
+
+#ifndef _I830_DEFINES_
+#define _I830_DEFINES_
+
+#define I830_DMA_BUF_ORDER 12
+#define I830_DMA_BUF_SZ (1<<I830_DMA_BUF_ORDER)
+#define I830_DMA_BUF_NR 256
+#define I830_NR_SAREA_CLIPRECTS 8
+
+/* Each region is a minimum of 64k, and there are at most 64 of them.
+ */
+#define I830_NR_TEX_REGIONS 64
+#define I830_LOG_MIN_TEX_REGION_SIZE 16
+
+/* if defining I830_ENABLE_4_TEXTURES, do it in i830_3d_reg.h, too */
+#if !defined(I830_ENABLE_4_TEXTURES)
+#define I830_TEXTURE_COUNT 2
+#define I830_TEXBLEND_COUNT 2 /* always same as TEXTURE_COUNT? */
+#else /* defined(I830_ENABLE_4_TEXTURES) */
+#define I830_TEXTURE_COUNT 4
+#define I830_TEXBLEND_COUNT 4 /* always same as TEXTURE_COUNT? */
+#endif /* I830_ENABLE_4_TEXTURES */
+
+#define I830_TEXBLEND_SIZE 12 /* (4 args + op) * 2 + COLOR_FACTOR */
+
+#define I830_UPLOAD_CTX 0x1
+#define I830_UPLOAD_BUFFERS 0x2
+#define I830_UPLOAD_CLIPRECTS 0x4
+#define I830_UPLOAD_TEX0_IMAGE 0x100 /* handled clientside */
+#define I830_UPLOAD_TEX0_CUBE 0x200 /* handled clientside */
+#define I830_UPLOAD_TEX1_IMAGE 0x400 /* handled clientside */
+#define I830_UPLOAD_TEX1_CUBE 0x800 /* handled clientside */
+#define I830_UPLOAD_TEX2_IMAGE 0x1000 /* handled clientside */
+#define I830_UPLOAD_TEX2_CUBE 0x2000 /* handled clientside */
+#define I830_UPLOAD_TEX3_IMAGE 0x4000 /* handled clientside */
+#define I830_UPLOAD_TEX3_CUBE 0x8000 /* handled clientside */
+#define I830_UPLOAD_TEX_N_IMAGE(n) (0x100 << (n * 2))
+#define I830_UPLOAD_TEX_N_CUBE(n) (0x200 << (n * 2))
+#define I830_UPLOAD_TEXIMAGE_MASK 0xff00
+#define I830_UPLOAD_TEX0 0x10000
+#define I830_UPLOAD_TEX1 0x20000
+#define I830_UPLOAD_TEX2 0x40000
+#define I830_UPLOAD_TEX3 0x80000
+#define I830_UPLOAD_TEX_N(n) (0x10000 << (n))
+#define I830_UPLOAD_TEX_MASK 0xf0000
+#define I830_UPLOAD_TEXBLEND0 0x100000
+#define I830_UPLOAD_TEXBLEND1 0x200000
+#define I830_UPLOAD_TEXBLEND2 0x400000
+#define I830_UPLOAD_TEXBLEND3 0x800000
+#define I830_UPLOAD_TEXBLEND_N(n) (0x100000 << (n))
+#define I830_UPLOAD_TEXBLEND_MASK 0xf00000
+#define I830_UPLOAD_TEX_PALETTE_N(n) (0x1000000 << (n))
+#define I830_UPLOAD_TEX_PALETTE_SHARED 0x4000000
+
+/* Indices into buf.Setup where various bits of state are mirrored per
+ * context and per buffer. These can be fired at the card as a unit,
+ * or in a piecewise fashion as required.
+ */
+
+/* Destbuffer state
+ * - backbuffer linear offset and pitch -- invarient in the current dri
+ * - zbuffer linear offset and pitch -- also invarient
+ * - drawing origin in back and depth buffers.
+ *
+ * Keep the depth/back buffer state here to acommodate private buffers
+ * in the future.
+ */
+
+#define I830_DESTREG_CBUFADDR 0
+/* Invarient */
+#define I830_DESTREG_DBUFADDR 1
+#define I830_DESTREG_DV0 2
+#define I830_DESTREG_DV1 3
+#define I830_DESTREG_SENABLE 4
+#define I830_DESTREG_SR0 5
+#define I830_DESTREG_SR1 6
+#define I830_DESTREG_SR2 7
+#define I830_DESTREG_DR0 8
+#define I830_DESTREG_DR1 9
+#define I830_DESTREG_DR2 10
+#define I830_DESTREG_DR3 11
+#define I830_DESTREG_DR4 12
+#define I830_DEST_SETUP_SIZE 13
+
+/* Context state
+ */
+#define I830_CTXREG_STATE1 0
+#define I830_CTXREG_STATE2 1
+#define I830_CTXREG_STATE3 2
+#define I830_CTXREG_STATE4 3
+#define I830_CTXREG_STATE5 4
+#define I830_CTXREG_IALPHAB 5
+#define I830_CTXREG_STENCILTST 6
+#define I830_CTXREG_ENABLES_1 7
+#define I830_CTXREG_ENABLES_2 8
+#define I830_CTXREG_AA 9
+#define I830_CTXREG_FOGCOLOR 10
+#define I830_CTXREG_BLENDCOLR0 11
+#define I830_CTXREG_BLENDCOLR 12 /* Dword 1 of 2 dword command */
+#define I830_CTXREG_VF 13
+#define I830_CTXREG_VF2 14
+#define I830_CTXREG_MCSB0 15
+#define I830_CTXREG_MCSB1 16
+#define I830_CTX_SETUP_SIZE 17
+
+/* Texture state (per tex unit)
+ */
+
+#define I830_TEXREG_MI0 0 /* GFX_OP_MAP_INFO (6 dwords) */
+#define I830_TEXREG_MI1 1
+#define I830_TEXREG_MI2 2
+#define I830_TEXREG_MI3 3
+#define I830_TEXREG_MI4 4
+#define I830_TEXREG_MI5 5
+#define I830_TEXREG_MF 6 /* GFX_OP_MAP_FILTER */
+#define I830_TEXREG_MLC 7 /* GFX_OP_MAP_LOD_CTL */
+#define I830_TEXREG_MLL 8 /* GFX_OP_MAP_LOD_LIMITS */
+#define I830_TEXREG_MCS 9 /* GFX_OP_MAP_COORD_SETS */
+#define I830_TEX_SETUP_SIZE 10
+
+#define I830_FRONT 0x1
+#define I830_BACK 0x2
+#define I830_DEPTH 0x4
+
+#endif /* _I830_DEFINES_ */
+
+typedef struct _drm_i830_init {
+ enum {
+ I830_INIT_DMA = 0x01,
+ I830_CLEANUP_DMA = 0x02
+ } func;
+ unsigned int mmio_offset;
+ unsigned int buffers_offset;
+ int sarea_priv_offset;
+ unsigned int ring_start;
+ unsigned int ring_end;
+ unsigned int ring_size;
+ unsigned int front_offset;
+ unsigned int back_offset;
+ unsigned int depth_offset;
+ unsigned int w;
+ unsigned int h;
+ unsigned int pitch;
+ unsigned int pitch_bits;
+ unsigned int back_pitch;
+ unsigned int depth_pitch;
+ unsigned int cpp;
+} drm_i830_init_t;
+
+/* Warning: If you change the SAREA structure you must change the Xserver
+ * structure as well */
+
+typedef struct _drm_i830_tex_region {
+ unsigned char next, prev; /* indices to form a circular LRU */
+ unsigned char in_use; /* owned by a client, or free? */
+ int age; /* tracked by clients to update local LRU's */
+} drm_i830_tex_region_t;
+
+typedef struct _drm_i830_sarea {
+ unsigned int ContextState[I830_CTX_SETUP_SIZE];
+ unsigned int BufferState[I830_DEST_SETUP_SIZE];
+ unsigned int TexState[I830_TEXTURE_COUNT][I830_TEX_SETUP_SIZE];
+ unsigned int TexBlendState[I830_TEXBLEND_COUNT][I830_TEXBLEND_SIZE];
+ unsigned int TexBlendStateWordsUsed[I830_TEXBLEND_COUNT];
+ unsigned int Palette[2][256];
+ unsigned int dirty;
+
+ unsigned int nbox;
+ drm_clip_rect_t boxes[I830_NR_SAREA_CLIPRECTS];
+
+ /* Maintain an LRU of contiguous regions of texture space. If
+ * you think you own a region of texture memory, and it has an
+ * age different to the one you set, then you are mistaken and
+ * it has been stolen by another client. If global texAge
+ * hasn't changed, there is no need to walk the list.
+ *
+ * These regions can be used as a proxy for the fine-grained
+ * texture information of other clients - by maintaining them
+ * in the same lru which is used to age their own textures,
+ * clients have an approximate lru for the whole of global
+ * texture space, and can make informed decisions as to which
+ * areas to kick out. There is no need to choose whether to
+ * kick out your own texture or someone else's - simply eject
+ * them all in LRU order.
+ */
+
+ drm_i830_tex_region_t texList[I830_NR_TEX_REGIONS+1];
+ /* Last elt is sentinal */
+ int texAge; /* last time texture was uploaded */
+ int last_enqueue; /* last time a buffer was enqueued */
+ int last_dispatch; /* age of the most recently dispatched buffer */
+ int last_quiescent; /* */
+ int ctxOwner; /* last context to upload state */
+
+ int vertex_prim;
+} drm_i830_sarea_t;
+
+typedef struct _drm_i830_clear {
+ int clear_color;
+ int clear_depth;
+ int flags;
+ unsigned int clear_colormask;
+ unsigned int clear_depthmask;
+} drm_i830_clear_t;
+
+
+
+/* These may be placeholders if we have more cliprects than
+ * I830_NR_SAREA_CLIPRECTS. In that case, the client sets discard to
+ * false, indicating that the buffer will be dispatched again with a
+ * new set of cliprects.
+ */
+typedef struct _drm_i830_vertex {
+ int idx; /* buffer index */
+ int used; /* nr bytes in use */
+ int discard; /* client is finished with the buffer? */
+} drm_i830_vertex_t;
+
+typedef struct _drm_i830_copy_t {
+ int idx; /* buffer index */
+ int used; /* nr bytes in use */
+ void *address; /* Address to copy from */
+} drm_i830_copy_t;
+
+typedef struct drm_i830_dma {
+ void *virtual;
+ int request_idx;
+ int request_size;
+ int granted;
+} drm_i830_dma_t;
+
+#endif /* _I830_DRM_H_ */
diff --git a/linux-core/i830_drv.c b/linux-core/i830_drv.c
new file mode 100644
index 00000000..904f3660
--- /dev/null
+++ b/linux-core/i830_drv.c
@@ -0,0 +1,102 @@
+/* i830_drv.c -- I810 driver -*- linux-c -*-
+ * Created: Mon Dec 13 01:56:22 1999 by jhartmann@precisioninsight.com
+ *
+ * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
+ * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors:
+ * Rickard E. (Rik) Faith <faith@valinux.com>
+ * Jeff Hartmann <jhartmann@valinux.com>
+ * Gareth Hughes <gareth@valinux.com>
+ * Abraham vd Merwe <abraham@2d3d.co.za>
+ */
+
+#include <linux/config.h>
+#include "i830.h"
+#include "drmP.h"
+#include "i830_drv.h"
+
+#define DRIVER_AUTHOR "VA Linux Systems Inc."
+
+#define DRIVER_NAME "i830"
+#define DRIVER_DESC "Intel 830M"
+#define DRIVER_DATE "20011004"
+
+#define DRIVER_MAJOR 1
+#define DRIVER_MINOR 2
+#define DRIVER_PATCHLEVEL 0
+
+#define DRIVER_IOCTLS \
+ [DRM_IOCTL_NR(DRM_IOCTL_I830_INIT)] = { i830_dma_init, 1, 1 }, \
+ [DRM_IOCTL_NR(DRM_IOCTL_I830_VERTEX)] = { i830_dma_vertex, 1, 0 }, \
+ [DRM_IOCTL_NR(DRM_IOCTL_I830_CLEAR)] = { i830_clear_bufs, 1, 0 }, \
+ [DRM_IOCTL_NR(DRM_IOCTL_I830_FLUSH)] = { i830_flush_ioctl, 1, 0 }, \
+ [DRM_IOCTL_NR(DRM_IOCTL_I830_GETAGE)] = { i830_getage, 1, 0 }, \
+ [DRM_IOCTL_NR(DRM_IOCTL_I830_GETBUF)] = { i830_getbuf, 1, 0 }, \
+ [DRM_IOCTL_NR(DRM_IOCTL_I830_SWAP)] = { i830_swap_bufs, 1, 0 }, \
+ [DRM_IOCTL_NR(DRM_IOCTL_I830_COPY)] = { i830_copybuf, 1, 0 }, \
+ [DRM_IOCTL_NR(DRM_IOCTL_I830_DOCOPY)] = { i830_docopy, 1, 0 },
+
+#define __HAVE_COUNTERS 4
+#define __HAVE_COUNTER6 _DRM_STAT_IRQ
+#define __HAVE_COUNTER7 _DRM_STAT_PRIMARY
+#define __HAVE_COUNTER8 _DRM_STAT_SECONDARY
+#define __HAVE_COUNTER9 _DRM_STAT_DMA
+
+
+#include "drm_agpsupport.h"
+#include "drm_auth.h"
+#include "drm_bufs.h"
+#include "drm_context.h"
+#include "drm_dma.h"
+#include "drm_drawable.h"
+#include "drm_drv.h"
+
+#ifndef MODULE
+/* DRM(options) is called by the kernel to parse command-line options
+ * passed via the boot-loader (e.g., LILO). It calls the insmod option
+ * routine, drm_parse_drm.
+ */
+
+/* JH- We have to hand expand the string ourselves because of the cpp. If
+ * anyone can think of a way that we can fit into the __setup macro without
+ * changing it, then please send the solution my way.
+ */
+static int __init i830_options( char *str )
+{
+ DRM(parse_options)( str );
+ return 1;
+}
+
+__setup( DRIVER_NAME "=", i830_options );
+#endif
+
+#include "drm_fops.h"
+#include "drm_init.h"
+#include "drm_ioctl.h"
+#include "drm_lock.h"
+#include "drm_lists.h"
+#include "drm_memory.h"
+#include "drm_proc.h"
+#include "drm_vm.h"
+#include "drm_stub.h"
diff --git a/linux-core/i830_drv.h b/linux-core/i830_drv.h
new file mode 100644
index 00000000..4e9d6c80
--- /dev/null
+++ b/linux-core/i830_drv.h
@@ -0,0 +1,213 @@
+/* i830_drv.h -- Private header for the I830 driver -*- linux-c -*-
+ * Created: Mon Dec 13 01:50:01 1999 by jhartmann@precisioninsight.com
+ *
+ * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
+ * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
+ * All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Rickard E. (Rik) Faith <faith@valinux.com>
+ * Jeff Hartmann <jhartmann@valinux.com>
+ *
+ */
+
+#ifndef _I830_DRV_H_
+#define _I830_DRV_H_
+
+typedef struct drm_i830_buf_priv {
+ u32 *in_use;
+ int my_use_idx;
+ int currently_mapped;
+ void *virtual;
+ void *kernel_virtual;
+ int map_count;
+ struct vm_area_struct *vma;
+} drm_i830_buf_priv_t;
+
+typedef struct _drm_i830_ring_buffer{
+ int tail_mask;
+ unsigned long Start;
+ unsigned long End;
+ unsigned long Size;
+ u8 *virtual_start;
+ int head;
+ int tail;
+ int space;
+} drm_i830_ring_buffer_t;
+
+typedef struct drm_i830_private {
+ drm_map_t *sarea_map;
+ drm_map_t *buffer_map;
+ drm_map_t *mmio_map;
+
+ drm_i830_sarea_t *sarea_priv;
+ drm_i830_ring_buffer_t ring;
+
+ unsigned long hw_status_page;
+ unsigned long counter;
+
+ atomic_t flush_done;
+ wait_queue_head_t flush_queue; /* Processes waiting until flush */
+ drm_buf_t *mmap_buffer;
+
+ u32 front_di1, back_di1, zi1;
+
+ int back_offset;
+ int depth_offset;
+ int w, h;
+ int pitch;
+ int back_pitch;
+ int depth_pitch;
+ unsigned int cpp;
+} drm_i830_private_t;
+
+ /* i830_dma.c */
+extern int i830_dma_schedule(drm_device_t *dev, int locked);
+extern int i830_getbuf(struct inode *inode, struct file *filp,
+ unsigned int cmd, unsigned long arg);
+extern int i830_dma_init(struct inode *inode, struct file *filp,
+ unsigned int cmd, unsigned long arg);
+extern int i830_flush_ioctl(struct inode *inode, struct file *filp,
+ unsigned int cmd, unsigned long arg);
+extern void i830_reclaim_buffers(drm_device_t *dev, pid_t pid);
+extern int i830_getage(struct inode *inode, struct file *filp, unsigned int cmd,
+ unsigned long arg);
+extern int i830_mmap_buffers(struct file *filp, struct vm_area_struct *vma);
+extern int i830_copybuf(struct inode *inode, struct file *filp,
+ unsigned int cmd, unsigned long arg);
+extern int i830_docopy(struct inode *inode, struct file *filp,
+ unsigned int cmd, unsigned long arg);
+
+extern void i830_dma_quiescent(drm_device_t *dev);
+
+extern int i830_dma_vertex(struct inode *inode, struct file *filp,
+ unsigned int cmd, unsigned long arg);
+
+extern int i830_swap_bufs(struct inode *inode, struct file *filp,
+ unsigned int cmd, unsigned long arg);
+
+extern int i830_clear_bufs(struct inode *inode, struct file *filp,
+ unsigned int cmd, unsigned long arg);
+
+#define I830_VERBOSE 0
+
+#define I830_BASE(reg) ((unsigned long) \
+ dev_priv->mmio_map->handle)
+#define I830_ADDR(reg) (I830_BASE(reg) + reg)
+#define I830_DEREF(reg) *(__volatile__ int *)I830_ADDR(reg)
+#define I830_READ(reg) I830_DEREF(reg)
+#define I830_WRITE(reg,val) do { I830_DEREF(reg) = val; } while (0)
+#define I830_DEREF16(reg) *(__volatile__ u16 *)I830_ADDR(reg)
+#define I830_READ16(reg) I830_DEREF16(reg)
+#define I830_WRITE16(reg,val) do { I830_DEREF16(reg) = val; } while (0)
+
+#define GFX_OP_USER_INTERRUPT ((0<<29)|(2<<23))
+#define GFX_OP_BREAKPOINT_INTERRUPT ((0<<29)|(1<<23))
+#define CMD_REPORT_HEAD (7<<23)
+#define CMD_STORE_DWORD_IDX ((0x21<<23) | 0x1)
+#define CMD_OP_BATCH_BUFFER ((0x0<<29)|(0x30<<23)|0x1)
+
+#define INST_PARSER_CLIENT 0x00000000
+#define INST_OP_FLUSH 0x02000000
+#define INST_FLUSH_MAP_CACHE 0x00000001
+
+
+#define BB1_START_ADDR_MASK (~0x7)
+#define BB1_PROTECTED (1<<0)
+#define BB1_UNPROTECTED (0<<0)
+#define BB2_END_ADDR_MASK (~0x7)
+
+#define I830REG_HWSTAM 0x02098
+#define I830REG_INT_IDENTITY_R 0x020a4
+#define I830REG_INT_MASK_R 0x020a8
+#define I830REG_INT_ENABLE_R 0x020a0
+
+#define LP_RING 0x2030
+#define HP_RING 0x2040
+#define RING_TAIL 0x00
+#define TAIL_ADDR 0x000FFFF8
+#define RING_HEAD 0x04
+#define HEAD_WRAP_COUNT 0xFFE00000
+#define HEAD_WRAP_ONE 0x00200000
+#define HEAD_ADDR 0x001FFFFC
+#define RING_START 0x08
+#define START_ADDR 0x00FFFFF8
+#define RING_LEN 0x0C
+#define RING_NR_PAGES 0x000FF000
+#define RING_REPORT_MASK 0x00000006
+#define RING_REPORT_64K 0x00000002
+#define RING_REPORT_128K 0x00000004
+#define RING_NO_REPORT 0x00000000
+#define RING_VALID_MASK 0x00000001
+#define RING_VALID 0x00000001
+#define RING_INVALID 0x00000000
+
+#define GFX_OP_SCISSOR ((0x3<<29)|(0x1c<<24)|(0x10<<19))
+#define SC_UPDATE_SCISSOR (0x1<<1)
+#define SC_ENABLE_MASK (0x1<<0)
+#define SC_ENABLE (0x1<<0)
+
+#define GFX_OP_SCISSOR_INFO ((0x3<<29)|(0x1d<<24)|(0x81<<16)|(0x1))
+#define SCI_YMIN_MASK (0xffff<<16)
+#define SCI_XMIN_MASK (0xffff<<0)
+#define SCI_YMAX_MASK (0xffff<<16)
+#define SCI_XMAX_MASK (0xffff<<0)
+
+#define GFX_OP_SCISSOR_ENABLE ((0x3<<29)|(0x1c<<24)|(0x10<<19))
+#define GFX_OP_SCISSOR_RECT ((0x3<<29)|(0x1d<<24)|(0x81<<16)|1)
+#define GFX_OP_COLOR_FACTOR ((0x3<<29)|(0x1d<<24)|(0x1<<16)|0x0)
+#define GFX_OP_STIPPLE ((0x3<<29)|(0x1d<<24)|(0x83<<16))
+#define GFX_OP_MAP_INFO ((0x3<<29)|(0x1d<<24)|0x4)
+#define GFX_OP_DESTBUFFER_VARS ((0x3<<29)|(0x1d<<24)|(0x85<<16)|0x0)
+#define GFX_OP_DRAWRECT_INFO ((0x3<<29)|(0x1d<<24)|(0x80<<16)|(0x3))
+#define GFX_OP_PRIMITIVE ((0x3<<29)|(0x1f<<24))
+
+#define CMD_OP_DESTBUFFER_INFO ((0x3<<29)|(0x1d<<24)|(0x8e<<16)|1)
+
+
+#define BR00_BITBLT_CLIENT 0x40000000
+#define BR00_OP_COLOR_BLT 0x10000000
+#define BR00_OP_SRC_COPY_BLT 0x10C00000
+#define BR13_SOLID_PATTERN 0x80000000
+
+#define BUF_3D_ID_COLOR_BACK (0x3<<24)
+#define BUF_3D_ID_DEPTH (0x7<<24)
+#define BUF_3D_USE_FENCE (1<<23)
+#define BUF_3D_PITCH(x) (((x)/4)<<2)
+
+#define CMD_OP_MAP_PALETTE_LOAD ((3<<29)|(0x1d<<24)|(0x82<<16)|255)
+#define MAP_PALETTE_NUM(x) ((x<<8) & (1<<8))
+#define MAP_PALETTE_BOTH (1<<11)
+
+#define XY_COLOR_BLT_CMD ((2<<29)|(0x50<<22)|0x4)
+#define XY_COLOR_BLT_WRITE_ALPHA (1<<21)
+#define XY_COLOR_BLT_WRITE_RGB (1<<20)
+
+#define XY_SRC_COPY_BLT_CMD ((2<<29)|(0x53<<22)|6)
+#define XY_SRC_COPY_BLT_WRITE_ALPHA (1<<21)
+#define XY_SRC_COPY_BLT_WRITE_RGB (1<<20)
+
+#define MI_BATCH_BUFFER ((0x30<<23)|1)
+#define MI_BATCH_NON_SECURE (1)
+
+
+#endif
+
diff --git a/linux-core/r128_drv.c b/linux-core/r128_drv.c
index d4eda6c8..d8d7be4f 100644
--- a/linux-core/r128_drv.c
+++ b/linux-core/r128_drv.c
@@ -39,11 +39,11 @@
#define DRIVER_NAME "r128"
#define DRIVER_DESC "ATI Rage 128"
-#define DRIVER_DATE "20010405"
+#define DRIVER_DATE "20010917"
#define DRIVER_MAJOR 2
-#define DRIVER_MINOR 1
-#define DRIVER_PATCHLEVEL 6
+#define DRIVER_MINOR 2
+#define DRIVER_PATCHLEVEL 0
#define DRIVER_IOCTLS \
[DRM_IOCTL_NR(DRM_IOCTL_DMA)] = { r128_cce_buffers, 1, 0 }, \
diff --git a/linux-core/sis_drv.c b/linux-core/sis_drv.c
index 3dd83fd7..2d9612f5 100644
--- a/linux-core/sis_drv.c
+++ b/linux-core/sis_drv.c
@@ -40,12 +40,12 @@
#define DRIVER_PATCHLEVEL 0
#define DRIVER_IOCTLS \
- [DRM_IOCTL_NR(SIS_IOCTL_FB_ALLOC)] = { sis_fb_alloc, 1, 1 }, \
- [DRM_IOCTL_NR(SIS_IOCTL_FB_FREE)] = { sis_fb_free, 1, 1 }, \
+ [DRM_IOCTL_NR(SIS_IOCTL_FB_ALLOC)] = { sis_fb_alloc, 1, 0 }, \
+ [DRM_IOCTL_NR(SIS_IOCTL_FB_FREE)] = { sis_fb_free, 1, 0 }, \
/* AGP Memory Management */ \
- [DRM_IOCTL_NR(SIS_IOCTL_AGP_INIT)] = { sisp_agp_init, 1, 1 }, \
- [DRM_IOCTL_NR(SIS_IOCTL_AGP_ALLOC)] = { sisp_agp_alloc, 1, 1 }, \
- [DRM_IOCTL_NR(SIS_IOCTL_AGP_FREE)] = { sisp_agp_free, 1, 1 }
+ [DRM_IOCTL_NR(SIS_IOCTL_AGP_INIT)] = { sisp_agp_init, 1, 0 }, \
+ [DRM_IOCTL_NR(SIS_IOCTL_AGP_ALLOC)] = { sisp_agp_alloc, 1, 0 }, \
+ [DRM_IOCTL_NR(SIS_IOCTL_AGP_FREE)] = { sisp_agp_free, 1, 0 }
#if 0 /* these don't appear to be defined */
/* SIS Stereo */
[DRM_IOCTL_NR(DRM_IOCTL_CONTROL)] = { sis_control, 1, 1 },
diff --git a/linux/Makefile.kernel b/linux/Makefile.kernel
index e2630989..1060d4cb 100644
--- a/linux/Makefile.kernel
+++ b/linux/Makefile.kernel
@@ -3,13 +3,14 @@
# Direct Rendering Infrastructure (DRI) in XFree86 4.1.0 and higher.
O_TARGET := drm.o
-list-multi := gamma.o tdfx.o r128.o mga.o i810.o ffb.o radeon.o
+list-multi := gamma.o tdfx.o r128.o mga.o i810.o i830.o ffb.o radeon.o
gamma-objs := gamma_drv.o gamma_dma.o
tdfx-objs := tdfx_drv.o
r128-objs := r128_drv.o r128_cce.o r128_state.o
mga-objs := mga_drv.o mga_dma.o mga_state.o mga_warp.o
i810-objs := i810_drv.o i810_dma.o
+i830-objs := i830_drv.o i830_dma.o
radeon-objs := radeon_drv.o radeon_cp.o radeon_state.o
ffb-objs := ffb_drv.o ffb_context.o
@@ -19,6 +20,7 @@ obj-$(CONFIG_DRM_R128) += r128.o
obj-$(CONFIG_DRM_RADEON)+= radeon.o
obj-$(CONFIG_DRM_MGA) += mga.o
obj-$(CONFIG_DRM_I810) += i810.o
+obj-$(CONFIG_DRM_I830) += i830.o
obj-$(CONFIG_DRM_FFB) += ffb.o
include $(TOPDIR)/Rules.make
@@ -35,6 +37,9 @@ mga.o: $(mga-objs) $(lib)
i810.o: $(i810-objs) $(lib)
$(LD) -r -o $@ $(i810-objs) $(lib)
+i830.o: $(i830-objs) $(lib)
+ $(LD) -r -o $@ $(i830-objs) $(lib)
+
r128.o: $(r128-objs) $(lib)
$(LD) -r -o $@ $(r128-objs) $(lib)
diff --git a/linux/Makefile.linux b/linux/Makefile.linux
index cb48028f..893d4985 100644
--- a/linux/Makefile.linux
+++ b/linux/Makefile.linux
@@ -31,15 +31,6 @@
# like this:
# make TREE=/usr/my-kernel-tree/include
#
-#
-# ***** NOTE NOTE NOTE NOTE NOTE *****
-# Because some distributions patch 2.2.x kernels to make kill_fasync have
-# three parameters, this script tries to determine, via the examination of
-# header files, if your kernel has been patched. If this detection is
-# incorrect, you can override the value on the command line, like this:
-# make PARAMS=2
-# or
-# make PARAMS=3
.SUFFIXES:
@@ -53,7 +44,7 @@ LIBS =
DRMTEMPLATES = drm_auth.h drm_bufs.h drm_context.h drm_dma.h drm_drawable.h \
drm_drv.h drm_fops.h drm_init.h drm_ioctl.h drm_lists.h \
drm_lock.h drm_memory.h drm_proc.h drm_stub.h drm_vm.h
-DRMHEADERS = drm.h drmP.h compat-pre24.h
+DRMHEADERS = drm.h drmP.h
GAMMAOBJS = gamma_drv.o gamma_dma.o
GAMMAHEADERS = gamma_drv.h $(DRMHEADERS) $(DRMTEMPLATES)
@@ -132,10 +123,6 @@ MODVERSIONS := $(shell gcc -E -nostdinc -I$(TREE) picker.c 2>/dev/null \
| grep -s 'MODVERSIONS = ' | cut -d' ' -f3)
AGP := $(shell gcc -E -nostdinc -I$(TREE) picker.c 2>/dev/null \
| grep -s 'AGP = ' | cut -d' ' -f3)
-SIS := $(shell gcc -E -nostdinc -I$(TREE) picker.c 2>/dev/null \
- | grep -s 'SIS = ' | cut -d' ' -f3)
-PARAMS := $(shell if fgrep kill_fasync $(TREE)/linux/fs.h 2>/dev/null \
- | egrep -q '(band|int, int)'; then echo 3; else echo 2; fi)
MACHINE := $(shell echo `uname -m`)
ifeq ($(AGP),0)
AGP := $(shell gcc -E -nostdinc -I$(TREE) picker.c 2>/dev/null \
@@ -145,13 +132,14 @@ endif
ifeq ($(AGP),1)
MODCFLAGS += -DCONFIG_AGP -DCONFIG_AGP_MODULE
DRMTEMPLATES += drm_agpsupport.h
-DRMHEADERS += agpsupport-pre24.h
MODS += mga.o
ifeq ($(MACHINE),i386)
MODS += i810.o
+MODS += i830.o
endif
ifeq ($(MACHINE),i686)
MODS += i810.o
+MODS += i830.o
endif
MGAOBJS = mga_drv.o mga_dma.o mga_state.o mga_warp.o
@@ -160,32 +148,26 @@ MGAHEADERS = mga.h mga_drv.h mga_drm.h $(DRMHEADERS) $(DRMTEMPLATES)
I810OBJS = i810_drv.o i810_dma.o
I810HEADERS = i810.h i810_drv.h i810_drm.h $(DRMHEADERS) $(DRMTEMPLATES)
+I830OBJS = i830_drv.o i830_dma.o
+I830HEADERS = i830.h i830_drv.h i830_drm.h $(DRMHEADERS) $(DRMTEMPLATES)
+
endif
ifeq ($(MACHINE),alpha)
MODCFLAGS+= -ffixed-8 -mno-fp-regs -mcpu=ev56 -Wa,-mev6
endif
-ifeq ($(SIS),1)
-# It appears that the SiS driver makes calls to sis_malloc and sis_free, and
-# that these calls are only defined if CONFIG_FB_SIS is selected. So, key
-# off that to determine if we should attempt to build the SiS driver.
-#
-# A better way would be to detect the appropriate definitions in the header
-# file to see if we can, at least, compile the driver.
MODS += sis.o
SISOBJS= sis_drv.o sis_mm.o sis_ds.o
SISHEADERS= sis_drv.h sis_drm.h $(DRMHEADERS)
MODCFLAGS += -DCONFIG_DRM_SIS
-endif
all::;@echo === KERNEL HEADERS IN $(TREE)
all::;@echo === SMP=${SMP} MODULES=${MODULES} MODVERSIONS=${MODVERSIONS} AGP=${AGP}
-all::;@echo === kill_fasync has $(PARAMS) parameters
all::;@echo === Compiling for machine $(MACHINE)
all::;@echo === WARNING
-all::;@echo === WARNING 2.4.0 kernels before 2.4.0-test11 DO NOT WORK
+all::;@echo === WARNING Use 2.4.x kernels ONLY !
all::;@echo === WARNING
ifeq ($(MODULES),0)
@@ -207,9 +189,6 @@ endif
ifeq ($(MODVERSIONS),1)
MODCFLAGS += -DMODVERSIONS -include $(TREE)/linux/modversions.h
endif
-ifeq ($(PARAMS),3)
-MODCFLAGS += -DKILLFASYNCHASTHREEPARAMETERS
-endif
# **** End of configuration
@@ -253,6 +232,11 @@ i810_drv.o: i810_drv.c
i810.o: $(I810OBJS) $(LIBS)
$(LD) -r $^ -o $@
+i830_drv.o: i830_drv.c
+ $(CC) $(MODCFLAGS) -DEXPORT_SYMTAB -I$(TREE) -c $< -o $@
+i830.o: $(I830OBJS) $(LIBS)
+ $(LD) -r $^ -o $@
+
endif
.PHONY: ChangeLog
@@ -273,6 +257,7 @@ $(TDFXOBJS): $(TDFXHEADERS)
ifeq ($(AGP),1)
$(MGAOBJS): $(MGAHEADERS)
$(I810OBJS): $(I810HEADERS)
+$(I830OBJS): $(I830HEADERS)
$(R128OBJS): $(R128HEADERS)
$(RADEONOBJS): $(RADEONHEADERS)
endif
diff --git a/linux/ati_pcigart.h b/linux/ati_pcigart.h
index a794b3e0..5851b72f 100644
--- a/linux/ati_pcigart.h
+++ b/linux/ati_pcigart.h
@@ -36,7 +36,7 @@
#elif PAGE_SIZE == 4096
# define ATI_PCIGART_TABLE_ORDER 3
# define ATI_PCIGART_TABLE_PAGES (1 << 3)
-#elif
+#else
# error - PAGE_SIZE not 8K or 4K
#endif
@@ -57,7 +57,7 @@ static unsigned long DRM(ati_alloc_pcigart_table)( void )
page = virt_to_page( address );
- for ( i = 0 ; i <= ATI_PCIGART_TABLE_PAGES ; i++, page++ ) {
+ for ( i = 0 ; i < ATI_PCIGART_TABLE_PAGES ; i++, page++ ) {
atomic_inc( &page->count );
SetPageReserved( page );
}
@@ -74,7 +74,7 @@ static void DRM(ati_free_pcigart_table)( unsigned long address )
page = virt_to_page( address );
- for ( i = 0 ; i <= ATI_PCIGART_TABLE_PAGES ; i++, page++ ) {
+ for ( i = 0 ; i < ATI_PCIGART_TABLE_PAGES ; i++, page++ ) {
atomic_dec( &page->count );
ClearPageReserved( page );
}
@@ -103,7 +103,6 @@ int DRM(ati_pcigart_init)( drm_device_t *dev,
goto done;
}
-#if defined(__alpha__) && (LINUX_VERSION_CODE >= 0x020400)
if ( !dev->pdev ) {
DRM_ERROR( "PCI device unknown!\n" );
goto done;
@@ -118,9 +117,6 @@ int DRM(ati_pcigart_init)( drm_device_t *dev,
address = 0;
goto done;
}
-#else
- bus_address = virt_to_bus( (void *)address );
-#endif
pci_gart = (u32 *)address;
@@ -130,7 +126,6 @@ int DRM(ati_pcigart_init)( drm_device_t *dev,
memset( pci_gart, 0, ATI_MAX_PCIGART_PAGES * sizeof(u32) );
for ( i = 0 ; i < pages ; i++ ) {
-#if defined(__alpha__) && (LINUX_VERSION_CODE >= 0x020400)
/* we need to support large memory configurations */
entry->busaddr[i] = pci_map_single(dev->pdev,
page_address( entry->pagelist[i] ),
@@ -144,9 +139,7 @@ int DRM(ati_pcigart_init)( drm_device_t *dev,
goto done;
}
page_base = (u32) entry->busaddr[i];
-#else
- page_base = page_to_bus( entry->pagelist[i] );
-#endif
+
for (j = 0; j < (PAGE_SIZE / ATI_PCIGART_PAGE_SIZE); j++) {
*pci_gart++ = cpu_to_le32( page_base );
page_base += ATI_PCIGART_PAGE_SIZE;
@@ -155,7 +148,7 @@ int DRM(ati_pcigart_init)( drm_device_t *dev,
ret = 1;
-#if __i386__
+#if defined(__i386__) || defined(__x86_64__)
asm volatile ( "wbinvd" ::: "memory" );
#else
mb();
@@ -171,7 +164,6 @@ int DRM(ati_pcigart_cleanup)( drm_device_t *dev,
unsigned long addr,
dma_addr_t bus_addr)
{
-#if defined(__alpha__) && (LINUX_VERSION_CODE >= 0x020400)
drm_sg_mem_t *entry = dev->sg;
unsigned long pages;
int i;
@@ -197,8 +189,6 @@ int DRM(ati_pcigart_cleanup)( drm_device_t *dev,
}
}
-#endif
-
if ( addr ) {
DRM(ati_free_pcigart_table)( addr );
}
diff --git a/linux/drm.h b/linux/drm.h
index ac9f407a..0db586fc 100644
--- a/linux/drm.h
+++ b/linux/drm.h
@@ -104,9 +104,8 @@ typedef struct drm_tex_region {
#include "i810_drm.h"
#include "r128_drm.h"
#include "radeon_drm.h"
-#ifdef CONFIG_DRM_SIS
#include "sis_drm.h"
-#endif
+#include "i830_drm.h"
typedef struct drm_version {
int version_major; /* Major version */
@@ -449,6 +448,12 @@ typedef struct drm_scatter_gather {
#define DRM_IOCTL_I810_SWAP DRM_IO( 0x46)
#define DRM_IOCTL_I810_COPY DRM_IOW( 0x47, drm_i810_copy_t)
#define DRM_IOCTL_I810_DOCOPY DRM_IO( 0x48)
+#define DRM_IOCTL_I810_OV0INFO DRM_IOR( 0x49, drm_i810_overlay_t)
+#define DRM_IOCTL_I810_FSTATUS DRM_IO ( 0x4a)
+#define DRM_IOCTL_I810_OV0FLIP DRM_IO ( 0x4b)
+#define DRM_IOCTL_I810_MC DRM_IOW( 0x4c, drm_i810_mc_t)
+#define DRM_IOCTL_I810_RSTATUS DRM_IO ( 0x4d )
+
/* Rage 128 specific ioctls */
#define DRM_IOCTL_R128_INIT DRM_IOW( 0x40, drm_r128_init_t)
@@ -483,7 +488,6 @@ typedef struct drm_scatter_gather {
#define DRM_IOCTL_RADEON_INDIRECT DRM_IOWR(0x4d, drm_radeon_indirect_t)
#define DRM_IOCTL_RADEON_TEXTURE DRM_IOWR(0x4e, drm_radeon_texture_t)
-#ifdef CONFIG_DRM_SIS
/* SiS specific ioctls */
#define SIS_IOCTL_FB_ALLOC DRM_IOWR(0x44, drm_sis_mem_t)
#define SIS_IOCTL_FB_FREE DRM_IOW( 0x45, drm_sis_mem_t)
@@ -493,6 +497,16 @@ typedef struct drm_scatter_gather {
#define SIS_IOCTL_FLIP DRM_IOW( 0x48, drm_sis_flip_t)
#define SIS_IOCTL_FLIP_INIT DRM_IO( 0x49)
#define SIS_IOCTL_FLIP_FINAL DRM_IO( 0x50)
-#endif
+
+/* I830 specific ioctls */
+#define DRM_IOCTL_I830_INIT DRM_IOW( 0x40, drm_i830_init_t)
+#define DRM_IOCTL_I830_VERTEX DRM_IOW( 0x41, drm_i830_vertex_t)
+#define DRM_IOCTL_I830_CLEAR DRM_IOW( 0x42, drm_i830_clear_t)
+#define DRM_IOCTL_I830_FLUSH DRM_IO ( 0x43)
+#define DRM_IOCTL_I830_GETAGE DRM_IO ( 0x44)
+#define DRM_IOCTL_I830_GETBUF DRM_IOWR(0x45, drm_i830_dma_t)
+#define DRM_IOCTL_I830_SWAP DRM_IO ( 0x46)
+#define DRM_IOCTL_I830_COPY DRM_IOW( 0x47, drm_i830_copy_t)
+#define DRM_IOCTL_I830_DOCOPY DRM_IO ( 0x48)
#endif
diff --git a/linux/drmP.h b/linux/drmP.h
index aac5c58a..4786ba9e 100644
--- a/linux/drmP.h
+++ b/linux/drmP.h
@@ -66,13 +66,8 @@
#include <linux/types.h>
#include <linux/agp_backend.h>
#endif
-#if LINUX_VERSION_CODE >= 0x020100 /* KERNEL_VERSION(2,1,0) */
#include <linux/tqueue.h>
#include <linux/poll.h>
-#endif
-#if LINUX_VERSION_CODE < 0x020400
-#include "compat-pre24.h"
-#endif
#include <asm/pgalloc.h>
#include "drm.h"
@@ -154,180 +149,7 @@
#define DRM_MAX_CTXBITMAP (PAGE_SIZE * 8)
- /* Backward compatibility section */
- /* _PAGE_WT changed to _PAGE_PWT in 2.2.6 */
-#ifndef _PAGE_PWT
-#define _PAGE_PWT _PAGE_WT
-#endif
- /* Wait queue declarations changed in 2.3.1 */
-#ifndef DECLARE_WAITQUEUE
-#define DECLARE_WAITQUEUE(w,c) struct wait_queue w = { c, NULL }
-typedef struct wait_queue *wait_queue_head_t;
-#define init_waitqueue_head(q) *q = NULL;
-#endif
-
- /* _PAGE_4M changed to _PAGE_PSE in 2.3.23 */
-#ifndef _PAGE_PSE
-#define _PAGE_PSE _PAGE_4M
-#endif
-
- /* vm_offset changed to vm_pgoff in 2.3.25 */
-#if LINUX_VERSION_CODE < 0x020319
-#define VM_OFFSET(vma) ((vma)->vm_offset)
-#else
#define VM_OFFSET(vma) ((vma)->vm_pgoff << PAGE_SHIFT)
-#endif
-
- /* *_nopage return values defined in 2.3.26 */
-#ifndef NOPAGE_SIGBUS
-#define NOPAGE_SIGBUS 0
-#endif
-#ifndef NOPAGE_OOM
-#define NOPAGE_OOM 0
-#endif
-
- /* module_init/module_exit added in 2.3.13 */
-#ifndef module_init
-#define module_init(x) int init_module(void) { return x(); }
-#endif
-#ifndef module_exit
-#define module_exit(x) void cleanup_module(void) { x(); }
-#endif
-
- /* Generic cmpxchg added in 2.3.x */
-#ifndef __HAVE_ARCH_CMPXCHG
- /* Include this here so that driver can be
- used with older kernels. */
-#if defined(__alpha__)
-static __inline__ unsigned long
-__cmpxchg_u32(volatile int *m, int old, int new)
-{
- unsigned long prev, cmp;
-
- __asm__ __volatile__(
- "1: ldl_l %0,%5\n"
- " cmpeq %0,%3,%1\n"
- " beq %1,2f\n"
- " mov %4,%1\n"
- " stl_c %1,%2\n"
- " beq %1,3f\n"
- "2: mb\n"
- ".subsection 2\n"
- "3: br 1b\n"
- ".previous"
- : "=&r"(prev), "=&r"(cmp), "=m"(*m)
- : "r"((long) old), "r"(new), "m"(*m)
- : "memory" );
-
- return prev;
-}
-
-static __inline__ unsigned long
-__cmpxchg_u64(volatile long *m, unsigned long old, unsigned long new)
-{
- unsigned long prev, cmp;
-
- __asm__ __volatile__(
- "1: ldq_l %0,%5\n"
- " cmpeq %0,%3,%1\n"
- " beq %1,2f\n"
- " mov %4,%1\n"
- " stq_c %1,%2\n"
- " beq %1,3f\n"
- "2: mb\n"
- ".subsection 2\n"
- "3: br 1b\n"
- ".previous"
- : "=&r"(prev), "=&r"(cmp), "=m"(*m)
- : "r"((long) old), "r"(new), "m"(*m)
- : "memory" );
-
- return prev;
-}
-
-static __inline__ unsigned long
-__cmpxchg(volatile void *ptr, unsigned long old, unsigned long new, int size)
-{
- switch (size) {
- case 4:
- return __cmpxchg_u32(ptr, old, new);
- case 8:
- return __cmpxchg_u64(ptr, old, new);
- }
- return old;
-}
-#define cmpxchg(ptr,o,n) \
- ({ \
- __typeof__(*(ptr)) _o_ = (o); \
- __typeof__(*(ptr)) _n_ = (n); \
- (__typeof__(*(ptr))) __cmpxchg((ptr), (unsigned long)_o_, \
- (unsigned long)_n_, sizeof(*(ptr))); \
- })
-
-#elif __i386__
-static inline unsigned long __cmpxchg(volatile void *ptr, unsigned long old,
- unsigned long new, int size)
-{
- unsigned long prev;
- switch (size) {
- case 1:
- __asm__ __volatile__(LOCK_PREFIX "cmpxchgb %b1,%2"
- : "=a"(prev)
- : "q"(new), "m"(*__xg(ptr)), "0"(old)
- : "memory");
- return prev;
- case 2:
- __asm__ __volatile__(LOCK_PREFIX "cmpxchgw %w1,%2"
- : "=a"(prev)
- : "q"(new), "m"(*__xg(ptr)), "0"(old)
- : "memory");
- return prev;
- case 4:
- __asm__ __volatile__(LOCK_PREFIX "cmpxchgl %1,%2"
- : "=a"(prev)
- : "q"(new), "m"(*__xg(ptr)), "0"(old)
- : "memory");
- return prev;
- }
- return old;
-}
-
-#elif defined(__powerpc__)
-extern void __cmpxchg_called_with_bad_pointer(void);
-static inline unsigned long __cmpxchg(volatile void *ptr, unsigned long old,
- unsigned long new, int size)
-{
- unsigned long prev;
-
- switch (size) {
- case 4:
- __asm__ __volatile__(
- "sync;"
- "0: lwarx %0,0,%1 ;"
- " cmpl 0,%0,%3;"
- " bne 1f;"
- " stwcx. %2,0,%1;"
- " bne- 0b;"
- "1: "
- "sync;"
- : "=&r"(prev)
- : "r"(ptr), "r"(new), "r"(old)
- : "cr0", "memory");
- return prev;
- }
- __cmpxchg_called_with_bad_pointer();
- return old;
-}
-
-#endif /* i386, powerpc & alpha */
-
-#ifndef __alpha__
-#define cmpxchg(ptr,o,n) \
- ((__typeof__(*(ptr)))__cmpxchg((ptr),(unsigned long)(o), \
- (unsigned long)(n),sizeof(*(ptr))))
-#endif
-
-#endif /* !__HAVE_ARCH_CMPXCHG */
/* Macros to make printk easier */
#define DRM_ERROR(fmt, arg...) \
@@ -621,6 +443,8 @@ typedef struct drm_agp_head {
int acquired;
unsigned long base;
int agp_mtrr;
+ int cant_use_aperture;
+ unsigned long page_mask;
} drm_agp_head_t;
#endif
@@ -629,9 +453,7 @@ typedef struct drm_sg_mem {
void *virtual;
int pages;
struct page **pagelist;
-#if defined(__alpha__) && (LINUX_VERSION_CODE >= 0x020400)
dma_addr_t *busaddr;
-#endif
} drm_sg_mem_t;
typedef struct drm_sigdata {
@@ -722,8 +544,8 @@ typedef struct drm_device {
#if __REALLY_HAVE_AGP
drm_agp_head_t *agp;
#endif
-#ifdef __alpha__
struct pci_dev *pdev;
+#ifdef __alpha__
#if LINUX_VERSION_CODE < 0x020403
struct pci_controler *hose;
#else
@@ -772,21 +594,6 @@ extern unsigned int DRM(poll)(struct file *filp,
struct poll_table_struct *wait);
/* Mapping support (drm_vm.h) */
-#if LINUX_VERSION_CODE < 0x020317
-extern unsigned long DRM(vm_nopage)(struct vm_area_struct *vma,
- unsigned long address,
- int write_access);
-extern unsigned long DRM(vm_shm_nopage)(struct vm_area_struct *vma,
- unsigned long address,
- int write_access);
-extern unsigned long DRM(vm_dma_nopage)(struct vm_area_struct *vma,
- unsigned long address,
- int write_access);
-extern unsigned long DRM(vm_sg_nopage)(struct vm_area_struct *vma,
- unsigned long address,
- int write_access);
-#else
- /* Return type changed in 2.3.23 */
extern struct page *DRM(vm_nopage)(struct vm_area_struct *vma,
unsigned long address,
int write_access);
@@ -799,7 +606,6 @@ extern struct page *DRM(vm_dma_nopage)(struct vm_area_struct *vma,
extern struct page *DRM(vm_sg_nopage)(struct vm_area_struct *vma,
unsigned long address,
int write_access);
-#endif
extern void DRM(vm_open)(struct vm_area_struct *vma);
extern void DRM(vm_close)(struct vm_area_struct *vma);
extern void DRM(vm_shm_close)(struct vm_area_struct *vma);
diff --git a/linux/drm_agpsupport.h b/linux/drm_agpsupport.h
index 66d1defc..fc0b29aa 100644
--- a/linux/drm_agpsupport.h
+++ b/linux/drm_agpsupport.h
@@ -35,12 +35,8 @@
#if __REALLY_HAVE_AGP
-#if LINUX_VERSION_CODE < 0x020400
-#include "agpsupport-pre24.h"
-#else
#define DRM_AGP_GET (drm_agp_t *)inter_module_get("drm_agp")
#define DRM_AGP_PUT inter_module_put("drm_agp")
-#endif
static const drm_agp_t *drm_agp = NULL;
@@ -271,22 +267,24 @@ drm_agp_head_t *DRM(agp_init)(void)
case INTEL_GX: head->chipset = "Intel 440GX"; break;
case INTEL_I810: head->chipset = "Intel i810"; break;
-#if LINUX_VERSION_CODE >= 0x020400
case INTEL_I815: head->chipset = "Intel i815"; break;
+#if LINUX_VERSION_CODE >= 0x020415
+ case INTEL_I820: head->chipset = "Intel i820"; break;
+#endif
case INTEL_I840: head->chipset = "Intel i840"; break;
- case INTEL_I850: head->chipset = "Intel i850"; break;
+#if LINUX_VERSION_CODE >= 0x020415
+ case INTEL_I845: head->chipset = "Intel i845"; break;
#endif
+ case INTEL_I850: head->chipset = "Intel i850"; break;
case VIA_GENERIC: head->chipset = "VIA"; break;
case VIA_VP3: head->chipset = "VIA VP3"; break;
case VIA_MVP3: head->chipset = "VIA MVP3"; break;
-#if LINUX_VERSION_CODE >= 0x020400
case VIA_MVP4: head->chipset = "VIA MVP4"; break;
case VIA_APOLLO_KX133: head->chipset = "VIA Apollo KX133";
break;
case VIA_APOLLO_KT133: head->chipset = "VIA Apollo KT133";
break;
-#endif
case VIA_APOLLO_PRO: head->chipset = "VIA Apollo Pro";
break;
@@ -316,6 +314,14 @@ drm_agp_head_t *DRM(agp_init)(void)
default: head->chipset = "Unknown"; break;
}
+#if LINUX_VERSION_CODE <= 0x020408
+ head->cant_use_aperture = 0;
+ head->page_mask = ~(0xfff);
+#else
+ head->cant_use_aperture = head->agp_info.cant_use_aperture;
+ head->page_mask = head->agp_info.page_mask;
+#endif
+
DRM_INFO("AGP %d.%d on %s @ 0x%08lx %ZuMB\n",
head->agp_info.version.major,
head->agp_info.version.minor,
diff --git a/linux/drm_bufs.h b/linux/drm_bufs.h
index ec5dd543..3ff3527f 100644
--- a/linux/drm_bufs.h
+++ b/linux/drm_bufs.h
@@ -229,11 +229,7 @@ int DRM(rmmap)(struct inode *inode, struct file *filp,
DRM(free)(list, sizeof(*list), DRM_MEM_MAPS);
for (pt = dev->vmalist, prev = NULL; pt; prev = pt, pt = pt->next) {
-#if LINUX_VERSION_CODE >= 0x020300
if (pt->vma->vm_private_data == map) found_maps++;
-#else
- if (pt->vma->vm_pte == map) found_maps++;
-#endif
}
if(!found_maps) {
diff --git a/linux/drm_context.h b/linux/drm_context.h
index eb4d61c8..e155946d 100644
--- a/linux/drm_context.h
+++ b/linux/drm_context.h
@@ -27,6 +27,10 @@
* Authors:
* Rickard E. (Rik) Faith <faith@valinux.com>
* Gareth Hughes <gareth@valinux.com>
+ * ChangeLog:
+ * 2001-11-16 Torsten Duwe <duwe@caldera.de>
+ * added context constructor/destructor hooks,
+ * needed by SiS driver's memory management.
*/
#define __NO_VERSION__
@@ -181,7 +185,7 @@ int DRM(setsareactx)(struct inode *inode, struct file *filp,
drm_device_t *dev = priv->dev;
drm_ctx_priv_map_t request;
drm_map_t *map = NULL;
- drm_map_list_t *r_list;
+ drm_map_list_t *r_list = NULL;
struct list_head *list;
if (copy_from_user(&request,
@@ -316,6 +320,10 @@ int DRM(addctx)( struct inode *inode, struct file *filp,
/* Should this return -EBUSY instead? */
return -ENOMEM;
}
+#ifdef DRIVER_CTX_CTOR
+ if ( ctx.handle != DRM_KERNEL_CONTEXT )
+ DRIVER_CTX_CTOR(ctx.handle); /* XXX: also pass dev ? */
+#endif
if ( copy_to_user( (drm_ctx_t *)arg, &ctx, sizeof(ctx) ) )
return -EFAULT;
@@ -390,6 +398,9 @@ int DRM(rmctx)( struct inode *inode, struct file *filp,
priv->remove_auth_on_close = 1;
}
if ( ctx.handle != DRM_KERNEL_CONTEXT ) {
+#ifdef DRIVER_CTX_DTOR
+ DRIVER_CTX_DTOR(ctx.handle); /* XXX: also pass dev ? */
+#endif
DRM(ctxbitmap_free)( dev, ctx.handle );
}
diff --git a/linux/drm_drv.h b/linux/drm_drv.h
index e1aff06d..23568288 100644
--- a/linux/drm_drv.h
+++ b/linux/drm_drv.h
@@ -113,7 +113,6 @@
#define DRIVER_IOCTLS
#endif
#ifndef DRIVER_FOPS
-#if LINUX_VERSION_CODE >= 0x020400
#define DRIVER_FOPS \
static struct file_operations DRM(fops) = { \
owner: THIS_MODULE, \
@@ -126,19 +125,6 @@ static struct file_operations DRM(fops) = { \
fasync: DRM(fasync), \
poll: DRM(poll), \
}
-#else
-#define DRIVER_FOPS \
-static struct file_operations DRM(fops) = { \
- open: DRM(open), \
- flush: DRM(flush), \
- release: DRM(release), \
- ioctl: DRM(ioctl), \
- mmap: DRM(mmap), \
- read: DRM(read), \
- fasync: DRM(fasync), \
- poll: DRM(poll), \
-}
-#endif
#endif
@@ -234,6 +220,9 @@ static char *drm_opts = NULL;
MODULE_AUTHOR( DRIVER_AUTHOR );
MODULE_DESCRIPTION( DRIVER_DESC );
MODULE_PARM( drm_opts, "s" );
+#ifdef MODULE_LICENSE
+MODULE_LICENSE("GPL and additional rights");
+#endif
static int DRM(setup)( drm_device_t *dev )
{
@@ -731,9 +720,6 @@ int DRM(open)( struct inode *inode, struct file *filp )
retcode = DRM(open_helper)( inode, filp, dev );
if ( !retcode ) {
-#if LINUX_VERSION_CODE < 0x020333
- MOD_INC_USE_COUNT; /* Needed before Linux 2.3.51 */
-#endif
atomic_inc( &dev->counts[_DRM_STAT_OPENS] );
spin_lock( &dev->count_lock );
if ( !dev->open_count++ ) {
@@ -852,9 +838,6 @@ int DRM(release)( struct inode *inode, struct file *filp )
* End inline drm_release
*/
-#if LINUX_VERSION_CODE < 0x020333
- MOD_DEC_USE_COUNT; /* Needed before Linux 2.3.51 */
-#endif
atomic_inc( &dev->counts[_DRM_STAT_CLOSES] );
spin_lock( &dev->count_lock );
if ( !--dev->open_count ) {
diff --git a/linux/drm_fops.h b/linux/drm_fops.h
index 3656c5e9..9c2135fe 100644
--- a/linux/drm_fops.h
+++ b/linux/drm_fops.h
@@ -125,21 +125,31 @@ ssize_t DRM(read)(struct file *filp, char *buf, size_t count, loff_t *off)
int avail;
int send;
int cur;
+ DECLARE_WAITQUEUE(wait, current);
DRM_DEBUG("%p, %p\n", dev->buf_rp, dev->buf_wp);
+ add_wait_queue(&dev->buf_readers, &wait);
+ set_current_state(TASK_INTERRUPTIBLE);
while (dev->buf_rp == dev->buf_wp) {
DRM_DEBUG(" sleeping\n");
if (filp->f_flags & O_NONBLOCK) {
+ remove_wait_queue(&dev->buf_readers, &wait);
+ set_current_state(TASK_RUNNING);
return -EAGAIN;
}
- interruptible_sleep_on(&dev->buf_readers);
+ schedule(); /* wait for dev->buf_readers */
if (signal_pending(current)) {
DRM_DEBUG(" interrupted\n");
+ remove_wait_queue(&dev->buf_readers, &wait);
+ set_current_state(TASK_RUNNING);
return -ERESTARTSYS;
}
DRM_DEBUG(" awake\n");
+ set_current_state(TASK_INTERRUPTIBLE);
}
+ remove_wait_queue(&dev->buf_readers, &wait);
+ set_current_state(TASK_RUNNING);
left = (dev->buf_rp + DRM_BSZ - dev->buf_wp) % DRM_BSZ;
avail = DRM_BSZ - left;
@@ -191,24 +201,8 @@ int DRM(write_string)(drm_device_t *dev, const char *s)
send -= count;
}
-#if LINUX_VERSION_CODE < 0x020315 && !defined(KILLFASYNCHASTHREEPARAMETERS)
- /* The extra parameter to kill_fasync was added in 2.3.21, and is
- _not_ present in _stock_ 2.2.14 and 2.2.15. However, some
- distributions patch 2.2.x kernels to add this parameter. The
- Makefile.linux attempts to detect this addition and defines
- KILLFASYNCHASTHREEPARAMETERS if three parameters are found. */
- if (dev->buf_async) kill_fasync(dev->buf_async, SIGIO);
-#else
-
- /* Parameter added in 2.3.21. */
-#if LINUX_VERSION_CODE < 0x020400
- if (dev->buf_async) kill_fasync(dev->buf_async, SIGIO, POLL_IN);
-#else
- /* Type of first parameter changed in
- Linux 2.4.0-test2... */
if (dev->buf_async) kill_fasync(&dev->buf_async, SIGIO, POLL_IN);
-#endif
-#endif
+
DRM_DEBUG("waking\n");
wake_up_interruptible(&dev->buf_readers);
return 0;
diff --git a/linux/drm_ioctl.h b/linux/drm_ioctl.h
index adcb22c8..0d8a1259 100644
--- a/linux/drm_ioctl.h
+++ b/linux/drm_ioctl.h
@@ -98,7 +98,6 @@ int DRM(setunique)(struct inode *inode, struct file *filp,
}
sprintf(dev->devname, "%s@%s", dev->name, dev->unique);
-#ifdef __alpha__
do {
struct pci_dev *pci_dev;
int b, d, f;
@@ -116,10 +115,11 @@ int DRM(setunique)(struct inode *inode, struct file *filp,
pci_dev = pci_find_slot(b, PCI_DEVFN(d,f));
if (pci_dev) {
dev->pdev = pci_dev;
+#ifdef __alpha__
dev->hose = pci_dev->sysdata;
+#endif
}
} while(0);
-#endif
return 0;
}
diff --git a/linux/drm_memory.h b/linux/drm_memory.h
index 498937d4..f11b80c3 100644
--- a/linux/drm_memory.h
+++ b/linux/drm_memory.h
@@ -85,12 +85,7 @@ void DRM(mem_init)(void)
}
si_meminfo(&si);
-#if LINUX_VERSION_CODE < 0x020317
- /* Changed to page count in 2.3.23 */
- DRM(ram_available) = si.totalram >> PAGE_SHIFT;
-#else
DRM(ram_available) = si.totalram;
-#endif
DRM(ram_used) = 0;
}
@@ -257,12 +252,7 @@ unsigned long DRM(alloc_pages)(int order, int area)
for (addr = address, sz = bytes;
sz > 0;
addr += PAGE_SIZE, sz -= PAGE_SIZE) {
-#if LINUX_VERSION_CODE >= 0x020400
- /* Argument type changed in 2.4.0-test6/pre8 */
mem_map_reserve(virt_to_page(addr));
-#else
- mem_map_reserve(MAP_NR(addr));
-#endif
}
return address;
@@ -283,12 +273,7 @@ void DRM(free_pages)(unsigned long address, int order, int area)
for (addr = address, sz = bytes;
sz > 0;
addr += PAGE_SIZE, sz -= PAGE_SIZE) {
-#if LINUX_VERSION_CODE >= 0x020400
- /* Argument type changed in 2.4.0-test6/pre8 */
mem_map_unreserve(virt_to_page(addr));
-#else
- mem_map_unreserve(MAP_NR(addr));
-#endif
}
free_pages(address, order);
}
diff --git a/linux/drm_proc.h b/linux/drm_proc.h
index f65f42b7..24e8556f 100644
--- a/linux/drm_proc.h
+++ b/linux/drm_proc.h
@@ -186,7 +186,7 @@ static int DRM(_vm_info)(char *buf, char **start, off_t offset, int request,
DRM_PROC_PRINT("slot offset size type flags "
"address mtrr\n\n");
i = 0;
- list_for_each(list, &dev->maplist->head) {
+ if (dev->maplist != NULL) list_for_each(list, &dev->maplist->head) {
r_list = (drm_map_list_t *)list;
map = r_list->map;
if(!map) continue;
diff --git a/linux/drm_scatter.h b/linux/drm_scatter.h
index 66d34968..a6b8275f 100644
--- a/linux/drm_scatter.h
+++ b/linux/drm_scatter.h
@@ -47,11 +47,9 @@ void DRM(sg_cleanup)( drm_sg_mem_t *entry )
vfree( entry->virtual );
-#if defined(__alpha__) && (LINUX_VERSION_CODE >= 0x020400)
DRM(free)( entry->busaddr,
entry->pages * sizeof(*entry->busaddr),
DRM_MEM_PAGES );
-#endif
DRM(free)( entry->pagelist,
entry->pages * sizeof(*entry->pagelist),
DRM_MEM_PAGES );
@@ -99,7 +97,6 @@ int DRM(sg_alloc)( struct inode *inode, struct file *filp,
return -ENOMEM;
}
-#if defined(__alpha__) && (LINUX_VERSION_CODE >= 0x020400)
entry->busaddr = DRM(alloc)( pages * sizeof(*entry->busaddr),
DRM_MEM_PAGES );
if ( !entry->busaddr ) {
@@ -112,15 +109,12 @@ int DRM(sg_alloc)( struct inode *inode, struct file *filp,
return -ENOMEM;
}
memset( (void *)entry->busaddr, 0, pages * sizeof(*entry->busaddr) );
-#endif
entry->virtual = vmalloc_32( pages << PAGE_SHIFT );
if ( !entry->virtual ) {
-#if defined(__alpha__) && (LINUX_VERSION_CODE >= 0x020400)
DRM(free)( entry->busaddr,
entry->pages * sizeof(*entry->busaddr),
DRM_MEM_PAGES );
-#endif
DRM(free)( entry->pagelist,
entry->pages * sizeof(*entry->pagelist),
DRM_MEM_PAGES );
diff --git a/linux/drm_stub.h b/linux/drm_stub.h
index fe8a1bc9..ead5b959 100644
--- a/linux/drm_stub.h
+++ b/linux/drm_stub.h
@@ -31,10 +31,6 @@
#define __NO_VERSION__
#include "drmP.h"
-#if LINUX_VERSION_CODE < 0x020400
-#include "stubsupport-pre24.h"
-#endif
-
#define DRM_STUB_MAXCARDS 16 /* Enough for one machine */
static struct drm_stub_list {
@@ -70,9 +66,7 @@ static int DRM(stub_open)(struct inode *inode, struct file *filp)
}
static struct file_operations DRM(stub_fops) = {
-#if LINUX_VERSION_CODE >= 0x020400
owner: THIS_MODULE,
-#endif
open: DRM(stub_open)
};
diff --git a/linux/drm_vm.h b/linux/drm_vm.h
index d9ee0ff4..5fd3571a 100644
--- a/linux/drm_vm.h
+++ b/linux/drm_vm.h
@@ -56,18 +56,11 @@ struct vm_operations_struct DRM(vm_sg_ops) = {
close: DRM(vm_close),
};
-#if LINUX_VERSION_CODE < 0x020317
-unsigned long DRM(vm_nopage)(struct vm_area_struct *vma,
- unsigned long address,
- int write_access)
-#else
- /* Return type changed in 2.3.23 */
struct page *DRM(vm_nopage)(struct vm_area_struct *vma,
unsigned long address,
int write_access)
-#endif
{
-#if defined(__alpha__) && __REALLY_HAVE_AGP
+#if __REALLY_HAVE_AGP
drm_file_t *priv = vma->vm_file->private_data;
drm_device_t *dev = priv->dev;
drm_map_t *map = NULL;
@@ -77,6 +70,9 @@ struct page *DRM(vm_nopage)(struct vm_area_struct *vma,
/*
* Find the right map
*/
+
+ if(!dev->agp->cant_use_aperture) goto vm_nopage_error;
+
list_for_each(list, &dev->maplist->head) {
r_list = (drm_map_list_t *)list;
map = r_list->map;
@@ -90,10 +86,12 @@ struct page *DRM(vm_nopage)(struct vm_area_struct *vma,
struct drm_agp_mem *agpmem;
struct page *page;
+#if __alpha__
/*
- * Make it a bus-relative address
+ * Adjust to a bus-relative address
*/
baddr -= dev->hose->mem_space->start;
+#endif
/*
* It's AGP memory - find the real physical page to map
@@ -104,50 +102,32 @@ struct page *DRM(vm_nopage)(struct vm_area_struct *vma,
break;
}
- if (!agpmem) {
- /*
- * Oops - no memory found
- */
- return NOPAGE_SIGBUS; /* couldn't find it */
- }
+ if (!agpmem) goto vm_nopage_error;
/*
* Get the page, inc the use count, and return it
*/
offset = (baddr - agpmem->bound) >> PAGE_SHIFT;
- agpmem->memory->memory[offset] &= ~1UL; /* HACK */
+ agpmem->memory->memory[offset] &= dev->agp->page_mask;
page = virt_to_page(__va(agpmem->memory->memory[offset]));
-#if 0
- DRM_ERROR("baddr = 0x%lx page = 0x%lx, offset = 0x%lx\n",
- baddr, __va(agpmem->memory->memory[offset]), offset);
-#endif
get_page(page);
-#if LINUX_VERSION_CODE < 0x020317
- return page_address(page);
-#else
+
+ DRM_DEBUG("baddr = 0x%lx page = 0x%p, offset = 0x%lx\n",
+ baddr, __va(agpmem->memory->memory[offset]), offset);
+
return page;
-#endif
}
-#endif
+vm_nopage_error:
+#endif /* __REALLY_HAVE_AGP */
+
return NOPAGE_SIGBUS; /* Disallow mremap */
}
-#if LINUX_VERSION_CODE < 0x020317
-unsigned long DRM(vm_shm_nopage)(struct vm_area_struct *vma,
- unsigned long address,
- int write_access)
-#else
- /* Return type changed in 2.3.23 */
struct page *DRM(vm_shm_nopage)(struct vm_area_struct *vma,
unsigned long address,
int write_access)
-#endif
{
-#if LINUX_VERSION_CODE >= 0x020300
drm_map_t *map = (drm_map_t *)vma->vm_private_data;
-#else
- drm_map_t *map = (drm_map_t *)vma->vm_pte;
-#endif
unsigned long offset;
unsigned long i;
pgd_t *pgd;
@@ -174,11 +154,7 @@ struct page *DRM(vm_shm_nopage)(struct vm_area_struct *vma,
get_page(page);
DRM_DEBUG("0x%08lx => 0x%08x\n", address, page_to_bus(page));
-#if LINUX_VERSION_CODE < 0x020317
- return page_address(page);
-#else
return page;
-#endif
}
/* Special close routine which deletes map information if we are the last
@@ -197,25 +173,14 @@ void DRM(vm_shm_close)(struct vm_area_struct *vma)
DRM_DEBUG("0x%08lx,0x%08lx\n",
vma->vm_start, vma->vm_end - vma->vm_start);
-#if LINUX_VERSION_CODE < 0x020333
- MOD_DEC_USE_COUNT; /* Needed before Linux 2.3.51 */
-#endif
atomic_dec(&dev->vma_count);
-#if LINUX_VERSION_CODE >= 0x020300
map = vma->vm_private_data;
-#else
- map = vma->vm_pte;
-#endif
down(&dev->struct_sem);
for (pt = dev->vmalist, prev = NULL; pt; pt = next) {
next = pt->next;
-#if LINUX_VERSION_CODE >= 0x020300
if (pt->vma->vm_private_data == map) found_maps++;
-#else
- if (pt->vma->vm_pte == map) found_maps++;
-#endif
if (pt->vma == vma) {
if (prev) {
prev->next = pt->next;
@@ -268,16 +233,9 @@ void DRM(vm_shm_close)(struct vm_area_struct *vma)
up(&dev->struct_sem);
}
-#if LINUX_VERSION_CODE < 0x020317
-unsigned long DRM(vm_dma_nopage)(struct vm_area_struct *vma,
- unsigned long address,
- int write_access)
-#else
- /* Return type changed in 2.3.23 */
struct page *DRM(vm_dma_nopage)(struct vm_area_struct *vma,
unsigned long address,
int write_access)
-#endif
{
drm_file_t *priv = vma->vm_file->private_data;
drm_device_t *dev = priv->dev;
@@ -299,29 +257,14 @@ struct page *DRM(vm_dma_nopage)(struct vm_area_struct *vma,
DRM_DEBUG("0x%08lx (page %lu) => 0x%08x\n", address, page_nr,
page_to_bus(page));
-#if LINUX_VERSION_CODE < 0x020317
- return page_address(page);
-#else
return page;
-#endif
}
-#if LINUX_VERSION_CODE < 0x020317
-unsigned long DRM(vm_sg_nopage)(struct vm_area_struct *vma,
- unsigned long address,
- int write_access)
-#else
- /* Return type changed in 2.3.23 */
struct page *DRM(vm_sg_nopage)(struct vm_area_struct *vma,
unsigned long address,
int write_access)
-#endif
{
-#if LINUX_VERSION_CODE >= 0x020300
drm_map_t *map = (drm_map_t *)vma->vm_private_data;
-#else
- drm_map_t *map = (drm_map_t *)vma->vm_pte;
-#endif
drm_file_t *priv = vma->vm_file->private_data;
drm_device_t *dev = priv->dev;
drm_sg_mem_t *entry = dev->sg;
@@ -341,11 +284,7 @@ struct page *DRM(vm_sg_nopage)(struct vm_area_struct *vma,
page = entry->pagelist[page_offset];
get_page(page);
-#if LINUX_VERSION_CODE < 0x020317
- return page_address(page);
-#else
return page;
-#endif
}
void DRM(vm_open)(struct vm_area_struct *vma)
@@ -357,10 +296,6 @@ void DRM(vm_open)(struct vm_area_struct *vma)
DRM_DEBUG("0x%08lx,0x%08lx\n",
vma->vm_start, vma->vm_end - vma->vm_start);
atomic_inc(&dev->vma_count);
-#if LINUX_VERSION_CODE < 0x020333
- /* The map can exist after the fd is closed. */
- MOD_INC_USE_COUNT; /* Needed before Linux 2.3.51 */
-#endif
vma_entry = DRM(alloc)(sizeof(*vma_entry), DRM_MEM_VMAS);
if (vma_entry) {
@@ -381,9 +316,6 @@ void DRM(vm_close)(struct vm_area_struct *vma)
DRM_DEBUG("0x%08lx,0x%08lx\n",
vma->vm_start, vma->vm_end - vma->vm_start);
-#if LINUX_VERSION_CODE < 0x020333
- MOD_DEC_USE_COUNT; /* Needed before Linux 2.3.51 */
-#endif
atomic_dec(&dev->vma_count);
down(&dev->struct_sem);
@@ -422,13 +354,13 @@ int DRM(mmap_dma)(struct file *filp, struct vm_area_struct *vma)
unlock_kernel();
vma->vm_ops = &DRM(vm_dma_ops);
- vma->vm_flags |= VM_LOCKED | VM_SHM; /* Don't swap */
-#if LINUX_VERSION_CODE < 0x020203 /* KERNEL_VERSION(2,2,3) */
- /* In Linux 2.2.3 and above, this is
- handled in do_mmap() in mm/mmap.c. */
- ++filp->f_count;
+#if LINUX_VERSION_CODE <= 0x020414
+ vma->vm_flags |= VM_LOCKED | VM_SHM; /* Don't swap */
+#else
+ vma->vm_flags |= VM_RESERVED; /* Don't swap */
#endif
+
vma->vm_file = filp; /* Needed for drm_vm_open() */
DRM(vm_open)(vma);
return 0;
@@ -543,34 +475,33 @@ int DRM(mmap)(struct file *filp, struct vm_area_struct *vma)
break;
case _DRM_SHM:
vma->vm_ops = &DRM(vm_shm_ops);
-#if LINUX_VERSION_CODE >= 0x020300
vma->vm_private_data = (void *)map;
-#else
- vma->vm_pte = (unsigned long)map;
-#endif
/* Don't let this area swap. Change when
DRM_KERNEL advisory is supported. */
+#if LINUX_VERSION_CODE <= 0x020414
vma->vm_flags |= VM_LOCKED;
+#else
+ vma->vm_flags |= VM_RESERVED;
+#endif
break;
case _DRM_SCATTER_GATHER:
vma->vm_ops = &DRM(vm_sg_ops);
-#if LINUX_VERSION_CODE >= 0x020300
vma->vm_private_data = (void *)map;
+#if LINUX_VERSION_CODE <= 0x020414
+ vma->vm_flags |= VM_LOCKED;
#else
- vma->vm_pte = (unsigned long)map;
+ vma->vm_flags |= VM_RESERVED;
#endif
- vma->vm_flags |= VM_LOCKED;
break;
default:
return -EINVAL; /* This should never happen. */
}
+#if LINUX_VERSION_CODE <= 0x020414
vma->vm_flags |= VM_LOCKED | VM_SHM; /* Don't swap */
-
-#if LINUX_VERSION_CODE < 0x020203 /* KERNEL_VERSION(2,2,3) */
- /* In Linux 2.2.3 and above, this is
- handled in do_mmap() in mm/mmap.c. */
- ++filp->f_count;
+#else
+ vma->vm_flags |= VM_RESERVED; /* Don't swap */
#endif
+
vma->vm_file = filp; /* Needed for drm_vm_open() */
DRM(vm_open)(vma);
return 0;
diff --git a/linux/i810_dma.c b/linux/i810_dma.c
index d6933d57..ae0a3616 100644
--- a/linux/i810_dma.c
+++ b/linux/i810_dma.c
@@ -86,6 +86,7 @@ static inline void i810_print_status_page(drm_device_t *dev)
DRM_DEBUG( "hw_status: LpRing Head ptr : %x\n", temp[1]);
DRM_DEBUG( "hw_status: IRing Head ptr : %x\n", temp[2]);
DRM_DEBUG( "hw_status: Reserved : %x\n", temp[3]);
+ DRM_DEBUG( "hw_status: Last Render: %x\n", temp[4]);
DRM_DEBUG( "hw_status: Driver Counter : %d\n", temp[5]);
for(i = 6; i < dma->buf_count + 6; i++) {
DRM_DEBUG( "buffer status idx : %d used: %d\n", i - 6, temp[i]);
@@ -227,14 +228,9 @@ static int i810_unmap_buffer(drm_buf_t *buf)
#else
down_write( &current->mm->mmap_sem );
#endif
-#if LINUX_VERSION_CODE < 0x020399
- retcode = do_munmap((unsigned long)buf_priv->virtual,
- (size_t) buf->total);
-#else
retcode = do_munmap(current->mm,
(unsigned long)buf_priv->virtual,
(size_t) buf->total);
-#endif
#if LINUX_VERSION_CODE <= 0x020402
up( &current->mm->mmap_sem );
#else
@@ -476,6 +472,9 @@ static int i810_dma_initialize(drm_device_t *dev,
dev_priv->back_offset = init->back_offset;
dev_priv->depth_offset = init->depth_offset;
+ dev_priv->overlay_offset = init->overlay_offset;
+ dev_priv->overlay_physical = init->overlay_physical;
+
dev_priv->front_di1 = init->front_offset | init->pitch_bits;
dev_priv->back_di1 = init->back_offset | init->pitch_bits;
dev_priv->zi1 = init->depth_offset | init->pitch_bits;
@@ -1264,3 +1263,156 @@ int i810_docopy(struct inode *inode, struct file *filp, unsigned int cmd,
if(VM_DONTCOPY == 0) return 1;
return 0;
}
+
+static void i810_dma_dispatch_mc(drm_device_t *dev, drm_buf_t *buf, int used,
+ unsigned int last_render)
+{
+ drm_i810_private_t *dev_priv = dev->dev_private;
+ drm_i810_buf_priv_t *buf_priv = buf->dev_private;
+ drm_i810_sarea_t *sarea_priv = dev_priv->sarea_priv;
+ unsigned long address = (unsigned long)buf->bus_address;
+ unsigned long start = address - dev->agp->base;
+ int u;
+ RING_LOCALS;
+
+ i810_kernel_lost_context(dev);
+
+ u = cmpxchg(buf_priv->in_use, I810_BUF_CLIENT,
+ I810_BUF_HARDWARE);
+ if(u != I810_BUF_CLIENT) {
+ DRM_DEBUG("MC found buffer that isn't mine!\n");
+ }
+
+ if (used > 4*1024)
+ used = 0;
+
+ sarea_priv->dirty = 0x7f;
+
+ DRM_DEBUG("dispatch mc addr 0x%lx, used 0x%x\n",
+ address, used);
+
+ dev_priv->counter++;
+ DRM_DEBUG("dispatch counter : %ld\n", dev_priv->counter);
+ DRM_DEBUG("i810_dma_dispatch_mc\n");
+ DRM_DEBUG("start : %lx\n", start);
+ DRM_DEBUG("used : %d\n", used);
+ DRM_DEBUG("start + used - 4 : %ld\n", start + used - 4);
+
+ if (buf_priv->currently_mapped == I810_BUF_MAPPED) {
+ if (used & 4) {
+ *(u32 *)((u32)buf_priv->virtual + used) = 0;
+ used += 4;
+ }
+
+ i810_unmap_buffer(buf);
+ }
+ BEGIN_LP_RING(4);
+ OUT_RING( CMD_OP_BATCH_BUFFER );
+ OUT_RING( start | BB1_PROTECTED );
+ OUT_RING( start + used - 4 );
+ OUT_RING( 0 );
+ ADVANCE_LP_RING();
+
+
+ BEGIN_LP_RING(8);
+ OUT_RING( CMD_STORE_DWORD_IDX );
+ OUT_RING( buf_priv->my_use_idx );
+ OUT_RING( I810_BUF_FREE );
+ OUT_RING( 0 );
+
+ OUT_RING( CMD_STORE_DWORD_IDX );
+ OUT_RING( 16 );
+ OUT_RING( last_render );
+ OUT_RING( 0 );
+ ADVANCE_LP_RING();
+}
+
+int i810_dma_mc(struct inode *inode, struct file *filp,
+ unsigned int cmd, unsigned long arg)
+{
+ drm_file_t *priv = filp->private_data;
+ drm_device_t *dev = priv->dev;
+ drm_device_dma_t *dma = dev->dma;
+ drm_i810_private_t *dev_priv = (drm_i810_private_t *)dev->dev_private;
+ u32 *hw_status = (u32 *)dev_priv->hw_status_page;
+ drm_i810_sarea_t *sarea_priv = (drm_i810_sarea_t *)
+ dev_priv->sarea_priv;
+ drm_i810_mc_t mc;
+
+ if (copy_from_user(&mc, (drm_i810_mc_t *)arg, sizeof(mc)))
+ return -EFAULT;
+
+
+ if(!_DRM_LOCK_IS_HELD(dev->lock.hw_lock->lock)) {
+ DRM_ERROR("i810_dma_mc called without lock held\n");
+ return -EINVAL;
+ }
+
+ i810_dma_dispatch_mc(dev, dma->buflist[mc.idx], mc.used,
+ mc.last_render );
+
+ atomic_add(mc.used, &dev->counts[_DRM_STAT_SECONDARY]);
+ atomic_inc(&dev->counts[_DRM_STAT_DMA]);
+ sarea_priv->last_enqueue = dev_priv->counter-1;
+ sarea_priv->last_dispatch = (int) hw_status[5];
+
+ return 0;
+}
+
+int i810_rstatus(struct inode *inode, struct file *filp,
+ unsigned int cmd, unsigned long arg)
+{
+ drm_file_t *priv = filp->private_data;
+ drm_device_t *dev = priv->dev;
+ drm_i810_private_t *dev_priv = (drm_i810_private_t *)dev->dev_private;
+
+ return (int)(((u32 *)(dev_priv->hw_status_page))[4]);
+}
+
+int i810_ov0_info(struct inode *inode, struct file *filp,
+ unsigned int cmd, unsigned long arg)
+{
+ drm_file_t *priv = filp->private_data;
+ drm_device_t *dev = priv->dev;
+ drm_i810_private_t *dev_priv = (drm_i810_private_t *)dev->dev_private;
+ drm_i810_overlay_t data;
+
+ data.offset = dev_priv->overlay_offset;
+ data.physical = dev_priv->overlay_physical;
+ copy_to_user((drm_i810_overlay_t *)arg,&data,sizeof(data));
+ return 0;
+}
+
+int i810_fstatus(struct inode *inode, struct file *filp,
+ unsigned int cmd, unsigned long arg)
+{
+ drm_file_t *priv = filp->private_data;
+ drm_device_t *dev = priv->dev;
+ drm_i810_private_t *dev_priv = (drm_i810_private_t *)dev->dev_private;
+
+ if(!_DRM_LOCK_IS_HELD(dev->lock.hw_lock->lock)) {
+ DRM_ERROR("i810_fstatus called without lock held\n");
+ return -EINVAL;
+ }
+ return I810_READ(0x30008);
+}
+
+int i810_ov0_flip(struct inode *inode, struct file *filp,
+ unsigned int cmd, unsigned long arg)
+{
+ drm_file_t *priv = filp->private_data;
+ drm_device_t *dev = priv->dev;
+ drm_i810_private_t *dev_priv = (drm_i810_private_t *)dev->dev_private;
+
+ if(!_DRM_LOCK_IS_HELD(dev->lock.hw_lock->lock)) {
+ DRM_ERROR("i810_ov0_flip called without lock held\n");
+ return -EINVAL;
+ }
+
+ //Tell the overlay to update
+ I810_WRITE(0x30000,dev_priv->overlay_physical | 0x80000000);
+
+ return 0;
+}
+
+
diff --git a/linux/i810_drm.h b/linux/i810_drm.h
index 5d47adda..8413293b 100644
--- a/linux/i810_drm.h
+++ b/linux/i810_drm.h
@@ -112,6 +112,8 @@ typedef struct _drm_i810_init {
unsigned int front_offset;
unsigned int back_offset;
unsigned int depth_offset;
+ unsigned int overlay_offset;
+ unsigned int overlay_physical;
unsigned int w;
unsigned int h;
unsigned int pitch;
@@ -196,4 +198,18 @@ typedef struct drm_i810_dma {
int granted;
} drm_i810_dma_t;
+typedef struct _drm_i810_overlay_t {
+ unsigned int offset; /* Address of the Overlay Regs */
+ unsigned int physical;
+} drm_i810_overlay_t;
+
+typedef struct _drm_i810_mc {
+ int idx; /* buffer index */
+ int used; /* nr bytes in use */
+ int num_blocks; /* number of GFXBlocks */
+ int *length; /* List of lengths for GFXBlocks (FUTURE)*/
+ unsigned int last_render; /* Last Render Request */
+} drm_i810_mc_t;
+
+
#endif /* _I810_DRM_H_ */
diff --git a/linux/i810_drv.c b/linux/i810_drv.c
index a228cb46..559b2617 100644
--- a/linux/i810_drv.c
+++ b/linux/i810_drv.c
@@ -39,10 +39,10 @@
#define DRIVER_NAME "i810"
#define DRIVER_DESC "Intel i810"
-#define DRIVER_DATE "20010616"
+#define DRIVER_DATE "20010920"
#define DRIVER_MAJOR 1
-#define DRIVER_MINOR 1
+#define DRIVER_MINOR 2
#define DRIVER_PATCHLEVEL 0
#define DRIVER_IOCTLS \
@@ -54,7 +54,12 @@
[DRM_IOCTL_NR(DRM_IOCTL_I810_GETBUF)] = { i810_getbuf, 1, 0 }, \
[DRM_IOCTL_NR(DRM_IOCTL_I810_SWAP)] = { i810_swap_bufs, 1, 0 }, \
[DRM_IOCTL_NR(DRM_IOCTL_I810_COPY)] = { i810_copybuf, 1, 0 }, \
- [DRM_IOCTL_NR(DRM_IOCTL_I810_DOCOPY)] = { i810_docopy, 1, 0 },
+ [DRM_IOCTL_NR(DRM_IOCTL_I810_DOCOPY)] = { i810_docopy, 1, 0 }, \
+ [DRM_IOCTL_NR(DRM_IOCTL_I810_OV0INFO)] = { i810_ov0_info, 1, 0 }, \
+ [DRM_IOCTL_NR(DRM_IOCTL_I810_FSTATUS)] = { i810_fstatus, 1, 0 }, \
+ [DRM_IOCTL_NR(DRM_IOCTL_I810_OV0FLIP)] = { i810_ov0_flip, 1, 0 }, \
+ [DRM_IOCTL_NR(DRM_IOCTL_I810_MC)] = { i810_dma_mc, 1, 1 }, \
+ [DRM_IOCTL_NR(DRM_IOCTL_I810_RSTATUS)] = { i810_rstatus, 1, 0 }
#define __HAVE_COUNTERS 4
diff --git a/linux/i810_drv.h b/linux/i810_drv.h
index e1b17148..a27384d4 100644
--- a/linux/i810_drv.h
+++ b/linux/i810_drv.h
@@ -73,6 +73,8 @@ typedef struct drm_i810_private {
int back_offset;
int depth_offset;
+ int overlay_offset;
+ int overlay_physical;
int w, h;
int pitch;
} drm_i810_private_t;
@@ -94,6 +96,18 @@ extern int i810_copybuf(struct inode *inode, struct file *filp,
extern int i810_docopy(struct inode *inode, struct file *filp,
unsigned int cmd, unsigned long arg);
+extern int i810_rstatus(struct inode *inode, struct file *filp,
+ unsigned int cmd, unsigned long arg);
+extern int i810_ov0_info(struct inode *inode, struct file *filp,
+ unsigned int cmd, unsigned long arg);
+extern int i810_fstatus(struct inode *inode, struct file *filp,
+ unsigned int cmd, unsigned long arg);
+extern int i810_ov0_flip(struct inode *inode, struct file *filp,
+ unsigned int cmd, unsigned long arg);
+extern int i810_dma_mc(struct inode *inode, struct file *filp,
+ unsigned int cmd, unsigned long arg);
+
+
extern void i810_dma_quiescent(drm_device_t *dev);
#define I810_VERBOSE 0
diff --git a/linux/i830.h b/linux/i830.h
new file mode 100644
index 00000000..fb7a0b32
--- /dev/null
+++ b/linux/i830.h
@@ -0,0 +1,116 @@
+/* i830.h -- Intel I830 DRM template customization -*- linux-c -*-
+ * Created: Thu Feb 15 00:01:12 2001 by gareth@valinux.com
+ *
+ * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors:
+ * Gareth Hughes <gareth@valinux.com>
+ */
+
+#ifndef __I830_H__
+#define __I830_H__
+
+/* This remains constant for all DRM template files.
+ */
+#define DRM(x) i830_##x
+
+/* General customization:
+ */
+#define __HAVE_AGP 1
+#define __MUST_HAVE_AGP 1
+#define __HAVE_MTRR 1
+#define __HAVE_CTX_BITMAP 1
+
+/* Driver customization:
+ */
+#define __HAVE_RELEASE 1
+#define DRIVER_RELEASE() do { \
+ i830_reclaim_buffers( dev, priv->pid ); \
+} while (0)
+
+/* DMA customization:
+ */
+#define __HAVE_DMA 1
+#define __HAVE_DMA_QUEUE 1
+#define __HAVE_DMA_WAITLIST 1
+#define __HAVE_DMA_RECLAIM 1
+
+#define __HAVE_DMA_QUIESCENT 1
+#define DRIVER_DMA_QUIESCENT() do { \
+ i830_dma_quiescent( dev ); \
+} while (0)
+
+#define __HAVE_DMA_IRQ 1
+#define __HAVE_DMA_IRQ_BH 1
+#define __HAVE_SHARED_IRQ 1
+#define DRIVER_PREINSTALL() do { \
+ drm_i830_private_t *dev_priv = \
+ (drm_i830_private_t *)dev->dev_private; \
+ u16 tmp; \
+ tmp = I830_READ16( I830REG_HWSTAM ); \
+ tmp = tmp & 0x6000; \
+ I830_WRITE16( I830REG_HWSTAM, tmp ); \
+ \
+ tmp = I830_READ16( I830REG_INT_MASK_R ); \
+ tmp = tmp & 0x6000; /* Unmask interrupts */ \
+ I830_WRITE16( I830REG_INT_MASK_R, tmp ); \
+ tmp = I830_READ16( I830REG_INT_ENABLE_R ); \
+ tmp = tmp & 0x6000; /* Disable all interrupts */ \
+ I830_WRITE16( I830REG_INT_ENABLE_R, tmp ); \
+} while (0)
+
+#define DRIVER_POSTINSTALL() do { \
+ drm_i830_private_t *dev_priv = \
+ (drm_i830_private_t *)dev->dev_private; \
+ u16 tmp; \
+ tmp = I830_READ16( I830REG_INT_ENABLE_R ); \
+ tmp = tmp & 0x6000; \
+ tmp = tmp | 0x0003; /* Enable bp & user interrupts */ \
+ I830_WRITE16( I830REG_INT_ENABLE_R, tmp ); \
+} while (0)
+
+#define DRIVER_UNINSTALL() do { \
+ drm_i830_private_t *dev_priv = \
+ (drm_i830_private_t *)dev->dev_private; \
+ u16 tmp; \
+ if ( dev_priv ) { \
+ tmp = I830_READ16( I830REG_INT_IDENTITY_R ); \
+ tmp = tmp & ~(0x6000); /* Clear all interrupts */ \
+ if ( tmp != 0 ) \
+ I830_WRITE16( I830REG_INT_IDENTITY_R, tmp ); \
+ \
+ tmp = I830_READ16( I830REG_INT_ENABLE_R ); \
+ tmp = tmp & 0x6000; /* Disable all interrupts */ \
+ I830_WRITE16( I830REG_INT_ENABLE_R, tmp ); \
+ } \
+} while (0)
+
+/* Buffer customization:
+ */
+
+#define DRIVER_BUF_PRIV_T drm_i830_buf_priv_t
+
+#define DRIVER_AGP_BUFFERS_MAP( dev ) \
+ ((drm_i830_private_t *)((dev)->dev_private))->buffer_map
+
+#endif
diff --git a/linux/i830_dma.c b/linux/i830_dma.c
new file mode 100644
index 00000000..661987fb
--- /dev/null
+++ b/linux/i830_dma.c
@@ -0,0 +1,1418 @@
+/* i830_dma.c -- DMA support for the I830 -*- linux-c -*-
+ * Created: Mon Dec 13 01:50:01 1999 by jhartmann@precisioninsight.com
+ *
+ * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
+ * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Rickard E. (Rik) Faith <faith@valinux.com>
+ * Jeff Hartmann <jhartmann@valinux.com>
+ * Keith Whitwell <keithw@valinux.com>
+ * Abraham vd Merwe <abraham@2d3d.co.za>
+ *
+ */
+
+#define __NO_VERSION__
+#include "i830.h"
+#include "drmP.h"
+#include "i830_drv.h"
+#include <linux/interrupt.h> /* For task queue support */
+
+/* in case we don't have a 2.3.99-pre6 kernel or later: */
+#ifndef VM_DONTCOPY
+#define VM_DONTCOPY 0
+#endif
+
+#define I830_BUF_FREE 2
+#define I830_BUF_CLIENT 1
+#define I830_BUF_HARDWARE 0
+
+#define I830_BUF_UNMAPPED 0
+#define I830_BUF_MAPPED 1
+
+#define RING_LOCALS unsigned int outring, ringmask; volatile char *virt;
+
+
+#define DO_IDLE_WORKAROUND() \
+do { \
+ int _head; \
+ int _tail; \
+ int _i; \
+ do { \
+ _head = I830_READ(LP_RING + RING_HEAD) & HEAD_ADDR; \
+ _tail = I830_READ(LP_RING + RING_TAIL) & TAIL_ADDR; \
+ for(_i = 0; _i < 65535; _i++); \
+ } while(_head != _tail); \
+} while(0)
+
+#define I830_SYNC_WORKAROUND 0
+
+#define BEGIN_LP_RING(n) do { \
+ if (I830_VERBOSE) \
+ DRM_DEBUG("BEGIN_LP_RING(%d) in %s\n", \
+ n, __FUNCTION__); \
+ if (I830_SYNC_WORKAROUND) \
+ DO_IDLE_WORKAROUND(); \
+ if (dev_priv->ring.space < n*4) \
+ i830_wait_ring(dev, n*4); \
+ dev_priv->ring.space -= n*4; \
+ outring = dev_priv->ring.tail; \
+ ringmask = dev_priv->ring.tail_mask; \
+ virt = dev_priv->ring.virtual_start; \
+} while (0)
+
+#define ADVANCE_LP_RING() do { \
+ if (I830_VERBOSE) DRM_DEBUG("ADVANCE_LP_RING\n"); \
+ dev_priv->ring.tail = outring; \
+ I830_WRITE(LP_RING + RING_TAIL, outring); \
+} while(0)
+
+#define OUT_RING(n) do { \
+ if (I830_VERBOSE) DRM_DEBUG(" OUT_RING %x\n", (int)(n)); \
+ *(volatile unsigned int *)(virt + outring) = n; \
+ outring += 4; \
+ outring &= ringmask; \
+} while (0);
+
+static inline void i830_print_status_page(drm_device_t *dev)
+{
+ drm_device_dma_t *dma = dev->dma;
+ drm_i830_private_t *dev_priv = dev->dev_private;
+ u32 *temp = (u32 *)dev_priv->hw_status_page;
+ int i;
+
+ DRM_DEBUG( "hw_status: Interrupt Status : %x\n", temp[0]);
+ DRM_DEBUG( "hw_status: LpRing Head ptr : %x\n", temp[1]);
+ DRM_DEBUG( "hw_status: IRing Head ptr : %x\n", temp[2]);
+ DRM_DEBUG( "hw_status: Reserved : %x\n", temp[3]);
+ DRM_DEBUG( "hw_status: Driver Counter : %d\n", temp[5]);
+ for(i = 9; i < dma->buf_count + 9; i++) {
+ DRM_DEBUG( "buffer status idx : %d used: %d\n", i - 9, temp[i]);
+ }
+}
+
+static drm_buf_t *i830_freelist_get(drm_device_t *dev)
+{
+ drm_device_dma_t *dma = dev->dma;
+ int i;
+ int used;
+
+ /* Linear search might not be the best solution */
+
+ for (i = 0; i < dma->buf_count; i++) {
+ drm_buf_t *buf = dma->buflist[ i ];
+ drm_i830_buf_priv_t *buf_priv = buf->dev_private;
+ /* In use is already a pointer */
+ used = cmpxchg(buf_priv->in_use, I830_BUF_FREE,
+ I830_BUF_CLIENT);
+ if(used == I830_BUF_FREE) {
+ return buf;
+ }
+ }
+ return NULL;
+}
+
+/* This should only be called if the buffer is not sent to the hardware
+ * yet, the hardware updates in use for us once its on the ring buffer.
+ */
+
+static int i830_freelist_put(drm_device_t *dev, drm_buf_t *buf)
+{
+ drm_i830_buf_priv_t *buf_priv = buf->dev_private;
+ int used;
+
+ /* In use is already a pointer */
+ used = cmpxchg(buf_priv->in_use, I830_BUF_CLIENT, I830_BUF_FREE);
+ if(used != I830_BUF_CLIENT) {
+ DRM_ERROR("Freeing buffer thats not in use : %d\n", buf->idx);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static struct file_operations i830_buffer_fops = {
+ open: DRM(open),
+ flush: DRM(flush),
+ release: DRM(release),
+ ioctl: DRM(ioctl),
+ mmap: i830_mmap_buffers,
+ read: DRM(read),
+ fasync: DRM(fasync),
+ poll: DRM(poll),
+};
+
+int i830_mmap_buffers(struct file *filp, struct vm_area_struct *vma)
+{
+ drm_file_t *priv = filp->private_data;
+ drm_device_t *dev;
+ drm_i830_private_t *dev_priv;
+ drm_buf_t *buf;
+ drm_i830_buf_priv_t *buf_priv;
+
+ lock_kernel();
+ dev = priv->dev;
+ dev_priv = dev->dev_private;
+ buf = dev_priv->mmap_buffer;
+ buf_priv = buf->dev_private;
+
+ vma->vm_flags |= (VM_IO | VM_DONTCOPY);
+ vma->vm_file = filp;
+
+ buf_priv->currently_mapped = I830_BUF_MAPPED;
+ unlock_kernel();
+
+ if (remap_page_range(vma->vm_start,
+ VM_OFFSET(vma),
+ vma->vm_end - vma->vm_start,
+ vma->vm_page_prot)) return -EAGAIN;
+ return 0;
+}
+
+static int i830_map_buffer(drm_buf_t *buf, struct file *filp)
+{
+ drm_file_t *priv = filp->private_data;
+ drm_device_t *dev = priv->dev;
+ drm_i830_buf_priv_t *buf_priv = buf->dev_private;
+ drm_i830_private_t *dev_priv = dev->dev_private;
+ struct file_operations *old_fops;
+ int retcode = 0;
+
+ if(buf_priv->currently_mapped == I830_BUF_MAPPED) return -EINVAL;
+
+ if(VM_DONTCOPY != 0) {
+#if LINUX_VERSION_CODE <= 0x020402
+ down( &current->mm->mmap_sem );
+#else
+ down_write( &current->mm->mmap_sem );
+#endif
+ old_fops = filp->f_op;
+ filp->f_op = &i830_buffer_fops;
+ dev_priv->mmap_buffer = buf;
+ buf_priv->virtual = (void *)do_mmap(filp, 0, buf->total,
+ PROT_READ|PROT_WRITE,
+ MAP_SHARED,
+ buf->bus_address);
+ dev_priv->mmap_buffer = NULL;
+ filp->f_op = old_fops;
+ if ((unsigned long)buf_priv->virtual > -1024UL) {
+ /* Real error */
+ DRM_DEBUG("mmap error\n");
+ retcode = (signed int)buf_priv->virtual;
+ buf_priv->virtual = 0;
+ }
+#if LINUX_VERSION_CODE <= 0x020402
+ up( &current->mm->mmap_sem );
+#else
+ up_write( &current->mm->mmap_sem );
+#endif
+ } else {
+ buf_priv->virtual = buf_priv->kernel_virtual;
+ buf_priv->currently_mapped = I830_BUF_MAPPED;
+ }
+ return retcode;
+}
+
+static int i830_unmap_buffer(drm_buf_t *buf)
+{
+ drm_i830_buf_priv_t *buf_priv = buf->dev_private;
+ int retcode = 0;
+
+ if(VM_DONTCOPY != 0) {
+ if(buf_priv->currently_mapped != I830_BUF_MAPPED)
+ return -EINVAL;
+#if LINUX_VERSION_CODE <= 0x020402
+ down( &current->mm->mmap_sem );
+#else
+ down_write( &current->mm->mmap_sem );
+#endif
+#if LINUX_VERSION_CODE < 0x020399
+ retcode = do_munmap((unsigned long)buf_priv->virtual,
+ (size_t) buf->total);
+#else
+ retcode = do_munmap(current->mm,
+ (unsigned long)buf_priv->virtual,
+ (size_t) buf->total);
+#endif
+#if LINUX_VERSION_CODE <= 0x020402
+ up( &current->mm->mmap_sem );
+#else
+ up_write( &current->mm->mmap_sem );
+#endif
+ }
+ buf_priv->currently_mapped = I830_BUF_UNMAPPED;
+ buf_priv->virtual = 0;
+
+ return retcode;
+}
+
+static int i830_dma_get_buffer(drm_device_t *dev, drm_i830_dma_t *d,
+ struct file *filp)
+{
+ drm_file_t *priv = filp->private_data;
+ drm_buf_t *buf;
+ drm_i830_buf_priv_t *buf_priv;
+ int retcode = 0;
+
+ buf = i830_freelist_get(dev);
+ if (!buf) {
+ retcode = -ENOMEM;
+ DRM_DEBUG("retcode=%d\n", retcode);
+ return retcode;
+ }
+
+ retcode = i830_map_buffer(buf, filp);
+ if(retcode) {
+ i830_freelist_put(dev, buf);
+ DRM_DEBUG("mapbuf failed, retcode %d\n", retcode);
+ return retcode;
+ }
+ buf->pid = priv->pid;
+ buf_priv = buf->dev_private;
+ d->granted = 1;
+ d->request_idx = buf->idx;
+ d->request_size = buf->total;
+ d->virtual = buf_priv->virtual;
+
+ return retcode;
+}
+
+static unsigned long i830_alloc_page(drm_device_t *dev)
+{
+ unsigned long address;
+
+ address = __get_free_page(GFP_KERNEL);
+ if(address == 0UL)
+ return 0;
+
+ atomic_inc(&virt_to_page(address)->count);
+ set_bit(PG_locked, &virt_to_page(address)->flags);
+
+ return address;
+}
+
+static void i830_free_page(drm_device_t *dev, unsigned long page)
+{
+ if(page == 0UL)
+ return;
+
+ atomic_dec(&virt_to_page(page)->count);
+ clear_bit(PG_locked, &virt_to_page(page)->flags);
+ wake_up(&virt_to_page(page)->wait);
+ free_page(page);
+ return;
+}
+
+static int i830_dma_cleanup(drm_device_t *dev)
+{
+ drm_device_dma_t *dma = dev->dma;
+
+ if(dev->dev_private) {
+ int i;
+ drm_i830_private_t *dev_priv =
+ (drm_i830_private_t *) dev->dev_private;
+
+ if(dev_priv->ring.virtual_start) {
+ DRM(ioremapfree)((void *) dev_priv->ring.virtual_start,
+ dev_priv->ring.Size);
+ }
+ if(dev_priv->hw_status_page != 0UL) {
+ i830_free_page(dev, dev_priv->hw_status_page);
+ /* Need to rewrite hardware status page */
+ I830_WRITE(0x02080, 0x1ffff000);
+ }
+ DRM(free)(dev->dev_private, sizeof(drm_i830_private_t),
+ DRM_MEM_DRIVER);
+ dev->dev_private = NULL;
+
+ for (i = 0; i < dma->buf_count; i++) {
+ drm_buf_t *buf = dma->buflist[ i ];
+ drm_i830_buf_priv_t *buf_priv = buf->dev_private;
+ DRM(ioremapfree)(buf_priv->kernel_virtual, buf->total);
+ }
+ }
+ return 0;
+}
+
+static int i830_wait_ring(drm_device_t *dev, int n)
+{
+ drm_i830_private_t *dev_priv = dev->dev_private;
+ drm_i830_ring_buffer_t *ring = &(dev_priv->ring);
+ int iters = 0;
+ unsigned long end;
+ unsigned int last_head = I830_READ(LP_RING + RING_HEAD) & HEAD_ADDR;
+
+ end = jiffies + (HZ*3);
+ while (ring->space < n) {
+ int i;
+
+ ring->head = I830_READ(LP_RING + RING_HEAD) & HEAD_ADDR;
+ ring->space = ring->head - (ring->tail+8);
+ if (ring->space < 0) ring->space += ring->Size;
+
+ if (ring->head != last_head) {
+ end = jiffies + (HZ*3);
+ last_head = ring->head;
+ }
+
+ iters++;
+ if((signed)(end - jiffies) <= 0) {
+ DRM_ERROR("space: %d wanted %d\n", ring->space, n);
+ DRM_ERROR("lockup\n");
+ goto out_wait_ring;
+ }
+
+ for (i = 0 ; i < 2000 ; i++) ;
+ }
+
+out_wait_ring:
+ return iters;
+}
+
+static void i830_kernel_lost_context(drm_device_t *dev)
+{
+ drm_i830_private_t *dev_priv = dev->dev_private;
+ drm_i830_ring_buffer_t *ring = &(dev_priv->ring);
+
+ ring->head = I830_READ(LP_RING + RING_HEAD) & HEAD_ADDR;
+ ring->tail = I830_READ(LP_RING + RING_TAIL);
+ ring->space = ring->head - (ring->tail+8);
+ if (ring->space < 0) ring->space += ring->Size;
+}
+
+static int i830_freelist_init(drm_device_t *dev, drm_i830_private_t *dev_priv)
+{
+ drm_device_dma_t *dma = dev->dma;
+ int my_idx = 36;
+ u32 *hw_status = (u32 *)(dev_priv->hw_status_page + my_idx);
+ int i;
+
+ if(dma->buf_count > 1019) {
+ /* Not enough space in the status page for the freelist */
+ return -EINVAL;
+ }
+
+ for (i = 0; i < dma->buf_count; i++) {
+ drm_buf_t *buf = dma->buflist[ i ];
+ drm_i830_buf_priv_t *buf_priv = buf->dev_private;
+
+ buf_priv->in_use = hw_status++;
+ buf_priv->my_use_idx = my_idx;
+ my_idx += 4;
+
+ *buf_priv->in_use = I830_BUF_FREE;
+
+ buf_priv->kernel_virtual = DRM(ioremap)(buf->bus_address,
+ buf->total);
+ }
+ return 0;
+}
+
+static int i830_dma_initialize(drm_device_t *dev,
+ drm_i830_private_t *dev_priv,
+ drm_i830_init_t *init)
+{
+ struct list_head *list;
+
+ memset(dev_priv, 0, sizeof(drm_i830_private_t));
+
+ list_for_each(list, &dev->maplist->head) {
+ drm_map_list_t *r_list = (drm_map_list_t *)list;
+ if( r_list->map &&
+ r_list->map->type == _DRM_SHM &&
+ r_list->map->flags & _DRM_CONTAINS_LOCK ) {
+ dev_priv->sarea_map = r_list->map;
+ break;
+ }
+ }
+
+ if(!dev_priv->sarea_map) {
+ dev->dev_private = (void *)dev_priv;
+ i830_dma_cleanup(dev);
+ DRM_ERROR("can not find sarea!\n");
+ return -EINVAL;
+ }
+ DRM_FIND_MAP( dev_priv->mmio_map, init->mmio_offset );
+ if(!dev_priv->mmio_map) {
+ dev->dev_private = (void *)dev_priv;
+ i830_dma_cleanup(dev);
+ DRM_ERROR("can not find mmio map!\n");
+ return -EINVAL;
+ }
+ DRM_FIND_MAP( dev_priv->buffer_map, init->buffers_offset );
+ if(!dev_priv->buffer_map) {
+ dev->dev_private = (void *)dev_priv;
+ i830_dma_cleanup(dev);
+ DRM_ERROR("can not find dma buffer map!\n");
+ return -EINVAL;
+ }
+
+ dev_priv->sarea_priv = (drm_i830_sarea_t *)
+ ((u8 *)dev_priv->sarea_map->handle +
+ init->sarea_priv_offset);
+
+ atomic_set(&dev_priv->flush_done, 0);
+ init_waitqueue_head(&dev_priv->flush_queue);
+
+ dev_priv->ring.Start = init->ring_start;
+ dev_priv->ring.End = init->ring_end;
+ dev_priv->ring.Size = init->ring_size;
+
+ dev_priv->ring.virtual_start = DRM(ioremap)(dev->agp->base +
+ init->ring_start,
+ init->ring_size);
+
+ if (dev_priv->ring.virtual_start == NULL) {
+ dev->dev_private = (void *) dev_priv;
+ i830_dma_cleanup(dev);
+ DRM_ERROR("can not ioremap virtual address for"
+ " ring buffer\n");
+ return -ENOMEM;
+ }
+
+ dev_priv->ring.tail_mask = dev_priv->ring.Size - 1;
+
+ dev_priv->w = init->w;
+ dev_priv->h = init->h;
+ dev_priv->pitch = init->pitch;
+ dev_priv->back_offset = init->back_offset;
+ dev_priv->depth_offset = init->depth_offset;
+
+ dev_priv->front_di1 = init->front_offset | init->pitch_bits;
+ dev_priv->back_di1 = init->back_offset | init->pitch_bits;
+ dev_priv->zi1 = init->depth_offset | init->pitch_bits;
+
+ dev_priv->cpp = init->cpp;
+ /* We are using seperate values as placeholders for mechanisms for
+ * private backbuffer/depthbuffer usage.
+ */
+
+ dev_priv->back_pitch = init->back_pitch;
+ dev_priv->depth_pitch = init->depth_pitch;
+
+ /* Program Hardware Status Page */
+ dev_priv->hw_status_page = i830_alloc_page(dev);
+ if(dev_priv->hw_status_page == 0UL) {
+ dev->dev_private = (void *)dev_priv;
+ i830_dma_cleanup(dev);
+ DRM_ERROR("Can not allocate hardware status page\n");
+ return -ENOMEM;
+ }
+ memset((void *) dev_priv->hw_status_page, 0, PAGE_SIZE);
+ DRM_DEBUG("hw status page @ %lx\n", dev_priv->hw_status_page);
+
+ I830_WRITE(0x02080, virt_to_bus((void *)dev_priv->hw_status_page));
+ DRM_DEBUG("Enabled hardware status page\n");
+
+ /* Now we need to init our freelist */
+ if(i830_freelist_init(dev, dev_priv) != 0) {
+ dev->dev_private = (void *)dev_priv;
+ i830_dma_cleanup(dev);
+ DRM_ERROR("Not enough space in the status page for"
+ " the freelist\n");
+ return -ENOMEM;
+ }
+ dev->dev_private = (void *)dev_priv;
+
+ return 0;
+}
+
+int i830_dma_init(struct inode *inode, struct file *filp,
+ unsigned int cmd, unsigned long arg)
+{
+ drm_file_t *priv = filp->private_data;
+ drm_device_t *dev = priv->dev;
+ drm_i830_private_t *dev_priv;
+ drm_i830_init_t init;
+ int retcode = 0;
+
+ if (copy_from_user(&init, (drm_i830_init_t *)arg, sizeof(init)))
+ return -EFAULT;
+
+ switch(init.func) {
+ case I830_INIT_DMA:
+ dev_priv = DRM(alloc)(sizeof(drm_i830_private_t),
+ DRM_MEM_DRIVER);
+ if(dev_priv == NULL) return -ENOMEM;
+ retcode = i830_dma_initialize(dev, dev_priv, &init);
+ break;
+ case I830_CLEANUP_DMA:
+ retcode = i830_dma_cleanup(dev);
+ break;
+ default:
+ retcode = -EINVAL;
+ break;
+ }
+
+ return retcode;
+}
+
+/* Most efficient way to verify state for the i830 is as it is
+ * emitted. Non-conformant state is silently dropped.
+ *
+ * Use 'volatile' & local var tmp to force the emitted values to be
+ * identical to the verified ones.
+ */
+static void i830EmitContextVerified( drm_device_t *dev,
+ volatile unsigned int *code )
+{
+ drm_i830_private_t *dev_priv = dev->dev_private;
+ int i, j = 0;
+ unsigned int tmp;
+ RING_LOCALS;
+
+ BEGIN_LP_RING( I830_CTX_SETUP_SIZE );
+ for ( i = 0 ; i < I830_CTX_SETUP_SIZE ; i++ ) {
+ tmp = code[i];
+
+#if 0
+ if ((tmp & (7<<29)) == (3<<29) &&
+ (tmp & (0x1f<<24)) < (0x1d<<24)) {
+ OUT_RING( tmp );
+ j++;
+ } else {
+ printk("Skipping %d\n", i);
+ }
+#else
+ OUT_RING( tmp );
+ j++;
+#endif
+ }
+
+ if (j & 1)
+ OUT_RING( 0 );
+
+ ADVANCE_LP_RING();
+}
+
+static void i830EmitTexVerified( drm_device_t *dev,
+ volatile unsigned int *code )
+{
+ drm_i830_private_t *dev_priv = dev->dev_private;
+ int i, j = 0;
+ unsigned int tmp;
+ RING_LOCALS;
+
+ BEGIN_LP_RING( I830_TEX_SETUP_SIZE );
+
+ OUT_RING( GFX_OP_MAP_INFO );
+ OUT_RING( code[I830_TEXREG_MI1] );
+ OUT_RING( code[I830_TEXREG_MI2] );
+ OUT_RING( code[I830_TEXREG_MI3] );
+ OUT_RING( code[I830_TEXREG_MI4] );
+ OUT_RING( code[I830_TEXREG_MI5] );
+
+ for ( i = 6 ; i < I830_TEX_SETUP_SIZE ; i++ ) {
+ tmp = code[i];
+ OUT_RING( tmp );
+ j++;
+ }
+
+ if (j & 1)
+ OUT_RING( 0 );
+
+ ADVANCE_LP_RING();
+}
+
+static void i830EmitTexBlendVerified( drm_device_t *dev,
+ volatile unsigned int *code,
+ volatile unsigned int num)
+{
+ drm_i830_private_t *dev_priv = dev->dev_private;
+ int i, j = 0;
+ unsigned int tmp;
+ RING_LOCALS;
+
+ BEGIN_LP_RING( num );
+
+ for ( i = 0 ; i < num ; i++ ) {
+ tmp = code[i];
+ OUT_RING( tmp );
+ j++;
+ }
+
+ if (j & 1)
+ OUT_RING( 0 );
+
+ ADVANCE_LP_RING();
+}
+
+static void i830EmitTexPalette( drm_device_t *dev,
+ unsigned int *palette,
+ int number,
+ int is_shared )
+{
+ drm_i830_private_t *dev_priv = dev->dev_private;
+ int i;
+ RING_LOCALS;
+
+ BEGIN_LP_RING( 258 );
+
+ if(is_shared == 1) {
+ OUT_RING(CMD_OP_MAP_PALETTE_LOAD |
+ MAP_PALETTE_NUM(0) |
+ MAP_PALETTE_BOTH);
+ } else {
+ OUT_RING(CMD_OP_MAP_PALETTE_LOAD | MAP_PALETTE_NUM(number));
+ }
+ for(i = 0; i < 256; i++) {
+ OUT_RING(palette[i]);
+ }
+ OUT_RING(0);
+}
+
+/* Need to do some additional checking when setting the dest buffer.
+ */
+static void i830EmitDestVerified( drm_device_t *dev,
+ volatile unsigned int *code )
+{
+ drm_i830_private_t *dev_priv = dev->dev_private;
+ unsigned int tmp;
+ RING_LOCALS;
+
+ BEGIN_LP_RING( I830_DEST_SETUP_SIZE + 6 );
+
+ tmp = code[I830_DESTREG_CBUFADDR];
+ if (tmp == dev_priv->front_di1) {
+ /* Don't use fence when front buffer rendering */
+ OUT_RING( CMD_OP_DESTBUFFER_INFO );
+ OUT_RING( BUF_3D_ID_COLOR_BACK |
+ BUF_3D_PITCH(dev_priv->back_pitch * dev_priv->cpp) );
+ OUT_RING( tmp );
+
+ OUT_RING( CMD_OP_DESTBUFFER_INFO );
+ OUT_RING( BUF_3D_ID_DEPTH |
+ BUF_3D_PITCH(dev_priv->depth_pitch * dev_priv->cpp));
+ OUT_RING( dev_priv->zi1 );
+ } else if(tmp == dev_priv->back_di1) {
+ OUT_RING( CMD_OP_DESTBUFFER_INFO );
+ OUT_RING( BUF_3D_ID_COLOR_BACK |
+ BUF_3D_PITCH(dev_priv->back_pitch * dev_priv->cpp) |
+ BUF_3D_USE_FENCE);
+ OUT_RING( tmp );
+
+ OUT_RING( CMD_OP_DESTBUFFER_INFO );
+ OUT_RING( BUF_3D_ID_DEPTH | BUF_3D_USE_FENCE |
+ BUF_3D_PITCH(dev_priv->depth_pitch * dev_priv->cpp));
+ OUT_RING( dev_priv->zi1 );
+ } else {
+ DRM_DEBUG("bad di1 %x (allow %x or %x)\n",
+ tmp, dev_priv->front_di1, dev_priv->back_di1);
+ }
+
+ /* invarient:
+ */
+
+
+ OUT_RING( GFX_OP_DESTBUFFER_VARS );
+ OUT_RING( code[I830_DESTREG_DV1] );
+
+ OUT_RING( GFX_OP_DRAWRECT_INFO );
+ OUT_RING( code[I830_DESTREG_DR1] );
+ OUT_RING( code[I830_DESTREG_DR2] );
+ OUT_RING( code[I830_DESTREG_DR3] );
+ OUT_RING( code[I830_DESTREG_DR4] );
+
+ /* Need to verify this */
+ tmp = code[I830_DESTREG_SENABLE];
+ if((tmp & ~0x3) == GFX_OP_SCISSOR_ENABLE) {
+ OUT_RING( tmp );
+ } else {
+ DRM_DEBUG("bad scissor enable\n");
+ OUT_RING( 0 );
+ }
+
+ OUT_RING( code[I830_DESTREG_SENABLE] );
+
+ OUT_RING( GFX_OP_SCISSOR_RECT );
+ OUT_RING( code[I830_DESTREG_SR1] );
+ OUT_RING( code[I830_DESTREG_SR2] );
+
+ ADVANCE_LP_RING();
+}
+
+static void i830EmitState( drm_device_t *dev )
+{
+ drm_i830_private_t *dev_priv = dev->dev_private;
+ drm_i830_sarea_t *sarea_priv = dev_priv->sarea_priv;
+ unsigned int dirty = sarea_priv->dirty;
+
+ if (dirty & I830_UPLOAD_BUFFERS) {
+ i830EmitDestVerified( dev, sarea_priv->BufferState );
+ sarea_priv->dirty &= ~I830_UPLOAD_BUFFERS;
+ }
+
+ if (dirty & I830_UPLOAD_CTX) {
+ i830EmitContextVerified( dev, sarea_priv->ContextState );
+ sarea_priv->dirty &= ~I830_UPLOAD_CTX;
+ }
+
+ if (dirty & I830_UPLOAD_TEX0) {
+ i830EmitTexVerified( dev, sarea_priv->TexState[0] );
+ sarea_priv->dirty &= ~I830_UPLOAD_TEX0;
+ }
+
+ if (dirty & I830_UPLOAD_TEX1) {
+ i830EmitTexVerified( dev, sarea_priv->TexState[1] );
+ sarea_priv->dirty &= ~I830_UPLOAD_TEX1;
+ }
+
+ if (dirty & I830_UPLOAD_TEXBLEND0) {
+ i830EmitTexBlendVerified( dev, sarea_priv->TexBlendState[0],
+ sarea_priv->TexBlendStateWordsUsed[0]);
+ sarea_priv->dirty &= ~I830_UPLOAD_TEXBLEND0;
+ }
+
+ if (dirty & I830_UPLOAD_TEXBLEND1) {
+ i830EmitTexBlendVerified( dev, sarea_priv->TexBlendState[1],
+ sarea_priv->TexBlendStateWordsUsed[1]);
+ sarea_priv->dirty &= ~I830_UPLOAD_TEXBLEND1;
+ }
+
+ if (dirty & I830_UPLOAD_TEX_PALETTE_SHARED) {
+ i830EmitTexPalette(dev, sarea_priv->Palette[0], 0, 1);
+ } else {
+ if (dirty & I830_UPLOAD_TEX_PALETTE_N(0)) {
+ i830EmitTexPalette(dev, sarea_priv->Palette[0], 0, 0);
+ sarea_priv->dirty &= ~I830_UPLOAD_TEX_PALETTE_N(0);
+ }
+ if (dirty & I830_UPLOAD_TEX_PALETTE_N(1)) {
+ i830EmitTexPalette(dev, sarea_priv->Palette[1], 1, 0);
+ sarea_priv->dirty &= ~I830_UPLOAD_TEX_PALETTE_N(1);
+ }
+ }
+}
+
+static void i830_dma_dispatch_clear( drm_device_t *dev, int flags,
+ unsigned int clear_color,
+ unsigned int clear_zval,
+ unsigned int clear_depthmask)
+{
+ drm_i830_private_t *dev_priv = dev->dev_private;
+ drm_i830_sarea_t *sarea_priv = dev_priv->sarea_priv;
+ int nbox = sarea_priv->nbox;
+ drm_clip_rect_t *pbox = sarea_priv->boxes;
+ int pitch = dev_priv->pitch;
+ int cpp = dev_priv->cpp;
+ int i;
+ unsigned int BR13, CMD, D_CMD;
+ RING_LOCALS;
+
+ i830_kernel_lost_context(dev);
+
+ switch(cpp) {
+ case 2:
+ BR13 = (0xF0 << 16) | (pitch * cpp) | (1<<24);
+ D_CMD = CMD = XY_COLOR_BLT_CMD;
+ break;
+ case 4:
+ BR13 = (0xF0 << 16) | (pitch * cpp) | (1<<24) | (1<<25);
+ CMD = (XY_COLOR_BLT_CMD | XY_COLOR_BLT_WRITE_ALPHA |
+ XY_COLOR_BLT_WRITE_RGB);
+ D_CMD = XY_COLOR_BLT_CMD;
+ if(clear_depthmask & 0x00ffffff)
+ D_CMD |= XY_COLOR_BLT_WRITE_RGB;
+ if(clear_depthmask & 0xff000000)
+ D_CMD |= XY_COLOR_BLT_WRITE_ALPHA;
+ break;
+ default:
+ BR13 = (0xF0 << 16) | (pitch * cpp) | (1<<24);
+ D_CMD = CMD = XY_COLOR_BLT_CMD;
+ break;
+ }
+
+ if (nbox > I830_NR_SAREA_CLIPRECTS)
+ nbox = I830_NR_SAREA_CLIPRECTS;
+
+ for (i = 0 ; i < nbox ; i++, pbox++) {
+ if (pbox->x1 > pbox->x2 ||
+ pbox->y1 > pbox->y2 ||
+ pbox->x2 > dev_priv->w ||
+ pbox->y2 > dev_priv->h)
+ continue;
+
+ if ( flags & I830_FRONT ) {
+ DRM_DEBUG("clear front\n");
+ BEGIN_LP_RING( 6 );
+ OUT_RING( CMD );
+ OUT_RING( BR13 );
+ OUT_RING( (pbox->y1 << 16) | pbox->x1 );
+ OUT_RING( (pbox->y2 << 16) | pbox->x2 );
+ OUT_RING( 0 );
+ OUT_RING( clear_color );
+ ADVANCE_LP_RING();
+ }
+
+ if ( flags & I830_BACK ) {
+ DRM_DEBUG("clear back\n");
+ BEGIN_LP_RING( 6 );
+ OUT_RING( CMD );
+ OUT_RING( BR13 );
+ OUT_RING( (pbox->y1 << 16) | pbox->x1 );
+ OUT_RING( (pbox->y2 << 16) | pbox->x2 );
+ OUT_RING( dev_priv->back_offset );
+ OUT_RING( clear_color );
+ ADVANCE_LP_RING();
+ }
+
+ if ( flags & I830_DEPTH ) {
+ DRM_DEBUG("clear depth\n");
+ BEGIN_LP_RING( 6 );
+ OUT_RING( D_CMD );
+ OUT_RING( BR13 );
+ OUT_RING( (pbox->y1 << 16) | pbox->x1 );
+ OUT_RING( (pbox->y2 << 16) | pbox->x2 );
+ OUT_RING( dev_priv->depth_offset );
+ OUT_RING( clear_zval );
+ ADVANCE_LP_RING();
+ }
+ }
+}
+
+static void i830_dma_dispatch_swap( drm_device_t *dev )
+{
+ drm_i830_private_t *dev_priv = dev->dev_private;
+ drm_i830_sarea_t *sarea_priv = dev_priv->sarea_priv;
+ int nbox = sarea_priv->nbox;
+ drm_clip_rect_t *pbox = sarea_priv->boxes;
+ int pitch = dev_priv->pitch;
+ int cpp = dev_priv->cpp;
+ int ofs = dev_priv->back_offset;
+ int i;
+ unsigned int CMD, BR13;
+ RING_LOCALS;
+
+ DRM_DEBUG("swapbuffers\n");
+
+ switch(cpp) {
+ case 2:
+ BR13 = (pitch * cpp) | (0xCC << 16) | (1<<24);
+ CMD = XY_SRC_COPY_BLT_CMD;
+ break;
+ case 4:
+ BR13 = (pitch * cpp) | (0xCC << 16) | (1<<24) | (1<<25);
+ CMD = (XY_SRC_COPY_BLT_CMD | XY_SRC_COPY_BLT_WRITE_ALPHA |
+ XY_SRC_COPY_BLT_WRITE_RGB);
+ break;
+ default:
+ BR13 = (pitch * cpp) | (0xCC << 16) | (1<<24);
+ CMD = XY_SRC_COPY_BLT_CMD;
+ break;
+ }
+
+ i830_kernel_lost_context(dev);
+
+ if (nbox > I830_NR_SAREA_CLIPRECTS)
+ nbox = I830_NR_SAREA_CLIPRECTS;
+
+ for (i = 0 ; i < nbox; i++, pbox++)
+ {
+ if (pbox->x1 > pbox->x2 ||
+ pbox->y1 > pbox->y2 ||
+ pbox->x2 > dev_priv->w ||
+ pbox->y2 > dev_priv->h)
+ continue;
+
+ DRM_DEBUG("dispatch swap %d,%d-%d,%d!\n",
+ pbox->x1, pbox->y1,
+ pbox->x2, pbox->y2);
+
+ BEGIN_LP_RING( 8 );
+ OUT_RING( CMD );
+ OUT_RING( BR13 );
+
+ OUT_RING( (pbox->y1 << 16) |
+ pbox->x1 );
+ OUT_RING( (pbox->y2 << 16) |
+ pbox->x2 );
+
+ OUT_RING( 0 /* front ofs always zero */ );
+ OUT_RING( (pbox->y1 << 16) |
+ pbox->x1 );
+
+ OUT_RING( BR13 & 0xffff );
+ OUT_RING( ofs );
+
+ ADVANCE_LP_RING();
+ }
+}
+
+
+static void i830_dma_dispatch_vertex(drm_device_t *dev,
+ drm_buf_t *buf,
+ int discard,
+ int used)
+{
+ drm_i830_private_t *dev_priv = dev->dev_private;
+ drm_i830_buf_priv_t *buf_priv = buf->dev_private;
+ drm_i830_sarea_t *sarea_priv = dev_priv->sarea_priv;
+ drm_clip_rect_t *box = sarea_priv->boxes;
+ int nbox = sarea_priv->nbox;
+ unsigned long address = (unsigned long)buf->bus_address;
+ unsigned long start = address - dev->agp->base;
+ int i = 0, u;
+ RING_LOCALS;
+
+ i830_kernel_lost_context(dev);
+
+ if (nbox > I830_NR_SAREA_CLIPRECTS)
+ nbox = I830_NR_SAREA_CLIPRECTS;
+
+ if (discard) {
+ u = cmpxchg(buf_priv->in_use, I830_BUF_CLIENT,
+ I830_BUF_HARDWARE);
+ if(u != I830_BUF_CLIENT) {
+ DRM_DEBUG("xxxx 2\n");
+ }
+ }
+
+ if (used > 4*1024)
+ used = 0;
+
+ if (sarea_priv->dirty)
+ i830EmitState( dev );
+
+ DRM_DEBUG("dispatch vertex addr 0x%lx, used 0x%x nbox %d\n",
+ address, used, nbox);
+
+ dev_priv->counter++;
+ DRM_DEBUG( "dispatch counter : %ld\n", dev_priv->counter);
+ DRM_DEBUG( "i830_dma_dispatch\n");
+ DRM_DEBUG( "start : %lx\n", start);
+ DRM_DEBUG( "used : %d\n", used);
+ DRM_DEBUG( "start + used - 4 : %ld\n", start + used - 4);
+
+ if (buf_priv->currently_mapped == I830_BUF_MAPPED) {
+ *(u32 *)buf_priv->virtual = (GFX_OP_PRIMITIVE |
+ sarea_priv->vertex_prim |
+ ((used/4)-2));
+
+ if (used & 4) {
+ *(u32 *)((u32)buf_priv->virtual + used) = 0;
+ used += 4;
+ }
+
+ i830_unmap_buffer(buf);
+ }
+
+ if (used) {
+ do {
+ if (i < nbox) {
+ BEGIN_LP_RING(6);
+ OUT_RING( GFX_OP_DRAWRECT_INFO );
+ OUT_RING( sarea_priv->BufferState[I830_DESTREG_DR1] );
+ OUT_RING( box[i].x1 | (box[i].y1<<16) );
+ OUT_RING( box[i].x2 | (box[i].y2<<16) );
+ OUT_RING( sarea_priv->BufferState[I830_DESTREG_DR4] );
+ OUT_RING( 0 );
+ ADVANCE_LP_RING();
+ }
+
+ BEGIN_LP_RING(4);
+
+ OUT_RING( MI_BATCH_BUFFER );
+ OUT_RING( start | MI_BATCH_NON_SECURE );
+ OUT_RING( start + used - 4 );
+ OUT_RING( 0 );
+ ADVANCE_LP_RING();
+
+ } while (++i < nbox);
+ }
+
+ BEGIN_LP_RING(10);
+ OUT_RING( CMD_STORE_DWORD_IDX );
+ OUT_RING( 20 );
+ OUT_RING( dev_priv->counter );
+ OUT_RING( 0 );
+
+ if (discard) {
+ OUT_RING( CMD_STORE_DWORD_IDX );
+ OUT_RING( buf_priv->my_use_idx );
+ OUT_RING( I830_BUF_FREE );
+ OUT_RING( 0 );
+ }
+
+ OUT_RING( CMD_REPORT_HEAD );
+ OUT_RING( 0 );
+ ADVANCE_LP_RING();
+}
+
+/* Interrupts are only for flushing */
+void i830_dma_service(int irq, void *device, struct pt_regs *regs)
+{
+ drm_device_t *dev = (drm_device_t *)device;
+ drm_i830_private_t *dev_priv = (drm_i830_private_t *)dev->dev_private;
+ u16 temp;
+
+ temp = I830_READ16(I830REG_INT_IDENTITY_R);
+ temp = temp & ~(0x6000);
+ if(temp != 0) I830_WRITE16(I830REG_INT_IDENTITY_R,
+ temp); /* Clear all interrupts */
+ else
+ return;
+
+ queue_task(&dev->tq, &tq_immediate);
+ mark_bh(IMMEDIATE_BH);
+}
+
+void DRM(dma_immediate_bh)(void *device)
+{
+ drm_device_t *dev = (drm_device_t *) device;
+ drm_i830_private_t *dev_priv = (drm_i830_private_t *)dev->dev_private;
+
+ atomic_set(&dev_priv->flush_done, 1);
+ wake_up_interruptible(&dev_priv->flush_queue);
+}
+
+static inline void i830_dma_emit_flush(drm_device_t *dev)
+{
+ drm_i830_private_t *dev_priv = dev->dev_private;
+ RING_LOCALS;
+
+ i830_kernel_lost_context(dev);
+
+ BEGIN_LP_RING(2);
+ OUT_RING( CMD_REPORT_HEAD );
+ OUT_RING( GFX_OP_USER_INTERRUPT );
+ ADVANCE_LP_RING();
+
+ i830_wait_ring( dev, dev_priv->ring.Size - 8 );
+ atomic_set(&dev_priv->flush_done, 1);
+ wake_up_interruptible(&dev_priv->flush_queue);
+}
+
+static inline void i830_dma_quiescent_emit(drm_device_t *dev)
+{
+ drm_i830_private_t *dev_priv = dev->dev_private;
+ RING_LOCALS;
+
+ i830_kernel_lost_context(dev);
+
+ BEGIN_LP_RING(4);
+ OUT_RING( INST_PARSER_CLIENT | INST_OP_FLUSH | INST_FLUSH_MAP_CACHE );
+ OUT_RING( CMD_REPORT_HEAD );
+ OUT_RING( 0 );
+ OUT_RING( GFX_OP_USER_INTERRUPT );
+ ADVANCE_LP_RING();
+
+ i830_wait_ring( dev, dev_priv->ring.Size - 8 );
+ atomic_set(&dev_priv->flush_done, 1);
+ wake_up_interruptible(&dev_priv->flush_queue);
+}
+
+void i830_dma_quiescent(drm_device_t *dev)
+{
+ DECLARE_WAITQUEUE(entry, current);
+ drm_i830_private_t *dev_priv = (drm_i830_private_t *)dev->dev_private;
+ unsigned long end;
+
+ if(dev_priv == NULL) {
+ return;
+ }
+ atomic_set(&dev_priv->flush_done, 0);
+ add_wait_queue(&dev_priv->flush_queue, &entry);
+ end = jiffies + (HZ*3);
+
+ for (;;) {
+ current->state = TASK_INTERRUPTIBLE;
+ i830_dma_quiescent_emit(dev);
+ if (atomic_read(&dev_priv->flush_done) == 1) break;
+ if((signed)(end - jiffies) <= 0) {
+ DRM_ERROR("lockup\n");
+ break;
+ }
+ schedule_timeout(HZ*3);
+ if (signal_pending(current)) {
+ break;
+ }
+ }
+
+ current->state = TASK_RUNNING;
+ remove_wait_queue(&dev_priv->flush_queue, &entry);
+
+ return;
+}
+
+static int i830_flush_queue(drm_device_t *dev)
+{
+ DECLARE_WAITQUEUE(entry, current);
+ drm_i830_private_t *dev_priv = (drm_i830_private_t *)dev->dev_private;
+ drm_device_dma_t *dma = dev->dma;
+ unsigned long end;
+ int i, ret = 0;
+
+ if(dev_priv == NULL) {
+ return 0;
+ }
+ atomic_set(&dev_priv->flush_done, 0);
+ add_wait_queue(&dev_priv->flush_queue, &entry);
+ end = jiffies + (HZ*3);
+ for (;;) {
+ current->state = TASK_INTERRUPTIBLE;
+ i830_dma_emit_flush(dev);
+ if (atomic_read(&dev_priv->flush_done) == 1) break;
+ if((signed)(end - jiffies) <= 0) {
+ DRM_ERROR("lockup\n");
+ break;
+ }
+ schedule_timeout(HZ*3);
+ if (signal_pending(current)) {
+ ret = -EINTR; /* Can't restart */
+ break;
+ }
+ }
+
+ current->state = TASK_RUNNING;
+ remove_wait_queue(&dev_priv->flush_queue, &entry);
+
+
+ for (i = 0; i < dma->buf_count; i++) {
+ drm_buf_t *buf = dma->buflist[ i ];
+ drm_i830_buf_priv_t *buf_priv = buf->dev_private;
+
+ int used = cmpxchg(buf_priv->in_use, I830_BUF_HARDWARE,
+ I830_BUF_FREE);
+
+ if (used == I830_BUF_HARDWARE)
+ DRM_DEBUG("reclaimed from HARDWARE\n");
+ if (used == I830_BUF_CLIENT)
+ DRM_DEBUG("still on client HARDWARE\n");
+ }
+
+ return ret;
+}
+
+/* Must be called with the lock held */
+void i830_reclaim_buffers(drm_device_t *dev, pid_t pid)
+{
+ drm_device_dma_t *dma = dev->dma;
+ int i;
+
+ if (!dma) return;
+ if (!dev->dev_private) return;
+ if (!dma->buflist) return;
+
+ i830_flush_queue(dev);
+
+ for (i = 0; i < dma->buf_count; i++) {
+ drm_buf_t *buf = dma->buflist[ i ];
+ drm_i830_buf_priv_t *buf_priv = buf->dev_private;
+
+ if (buf->pid == pid && buf_priv) {
+ int used = cmpxchg(buf_priv->in_use, I830_BUF_CLIENT,
+ I830_BUF_FREE);
+
+ if (used == I830_BUF_CLIENT)
+ DRM_DEBUG("reclaimed from client\n");
+ if(buf_priv->currently_mapped == I830_BUF_MAPPED)
+ buf_priv->currently_mapped = I830_BUF_UNMAPPED;
+ }
+ }
+}
+
+int i830_flush_ioctl(struct inode *inode, struct file *filp,
+ unsigned int cmd, unsigned long arg)
+{
+ drm_file_t *priv = filp->private_data;
+ drm_device_t *dev = priv->dev;
+
+ DRM_DEBUG("i830_flush_ioctl\n");
+ if(!_DRM_LOCK_IS_HELD(dev->lock.hw_lock->lock)) {
+ DRM_ERROR("i830_flush_ioctl called without lock held\n");
+ return -EINVAL;
+ }
+
+ i830_flush_queue(dev);
+ return 0;
+}
+
+int i830_dma_vertex(struct inode *inode, struct file *filp,
+ unsigned int cmd, unsigned long arg)
+{
+ drm_file_t *priv = filp->private_data;
+ drm_device_t *dev = priv->dev;
+ drm_device_dma_t *dma = dev->dma;
+ drm_i830_private_t *dev_priv = (drm_i830_private_t *)dev->dev_private;
+ u32 *hw_status = (u32 *)dev_priv->hw_status_page;
+ drm_i830_sarea_t *sarea_priv = (drm_i830_sarea_t *)
+ dev_priv->sarea_priv;
+ drm_i830_vertex_t vertex;
+
+ if (copy_from_user(&vertex, (drm_i830_vertex_t *)arg, sizeof(vertex)))
+ return -EFAULT;
+
+ if(!_DRM_LOCK_IS_HELD(dev->lock.hw_lock->lock)) {
+ DRM_ERROR("i830_dma_vertex called without lock held\n");
+ return -EINVAL;
+ }
+
+ DRM_DEBUG("i830 dma vertex, idx %d used %d discard %d\n",
+ vertex.idx, vertex.used, vertex.discard);
+
+ if(vertex.idx < 0 || vertex.idx > dma->buf_count) return -EINVAL;
+
+ i830_dma_dispatch_vertex( dev,
+ dma->buflist[ vertex.idx ],
+ vertex.discard, vertex.used );
+
+ sarea_priv->last_enqueue = dev_priv->counter-1;
+ sarea_priv->last_dispatch = (int) hw_status[5];
+
+ return 0;
+}
+
+int i830_clear_bufs(struct inode *inode, struct file *filp,
+ unsigned int cmd, unsigned long arg)
+{
+ drm_file_t *priv = filp->private_data;
+ drm_device_t *dev = priv->dev;
+ drm_i830_clear_t clear;
+
+ if (copy_from_user(&clear, (drm_i830_clear_t *)arg, sizeof(clear)))
+ return -EFAULT;
+
+ if(!_DRM_LOCK_IS_HELD(dev->lock.hw_lock->lock)) {
+ DRM_ERROR("i830_clear_bufs called without lock held\n");
+ return -EINVAL;
+ }
+
+ /* GH: Someone's doing nasty things... */
+ if (!dev->dev_private) {
+ return -EINVAL;
+ }
+
+ i830_dma_dispatch_clear( dev, clear.flags,
+ clear.clear_color,
+ clear.clear_depth,
+ clear.clear_depthmask);
+ return 0;
+}
+
+int i830_swap_bufs(struct inode *inode, struct file *filp,
+ unsigned int cmd, unsigned long arg)
+{
+ drm_file_t *priv = filp->private_data;
+ drm_device_t *dev = priv->dev;
+
+ DRM_DEBUG("i830_swap_bufs\n");
+
+ if(!_DRM_LOCK_IS_HELD(dev->lock.hw_lock->lock)) {
+ DRM_ERROR("i830_swap_buf called without lock held\n");
+ return -EINVAL;
+ }
+
+ i830_dma_dispatch_swap( dev );
+ return 0;
+}
+
+int i830_getage(struct inode *inode, struct file *filp, unsigned int cmd,
+ unsigned long arg)
+{
+ drm_file_t *priv = filp->private_data;
+ drm_device_t *dev = priv->dev;
+ drm_i830_private_t *dev_priv = (drm_i830_private_t *)dev->dev_private;
+ u32 *hw_status = (u32 *)dev_priv->hw_status_page;
+ drm_i830_sarea_t *sarea_priv = (drm_i830_sarea_t *)
+ dev_priv->sarea_priv;
+
+ sarea_priv->last_dispatch = (int) hw_status[5];
+ return 0;
+}
+
+int i830_getbuf(struct inode *inode, struct file *filp, unsigned int cmd,
+ unsigned long arg)
+{
+ drm_file_t *priv = filp->private_data;
+ drm_device_t *dev = priv->dev;
+ int retcode = 0;
+ drm_i830_dma_t d;
+ drm_i830_private_t *dev_priv = (drm_i830_private_t *)dev->dev_private;
+ u32 *hw_status = (u32 *)dev_priv->hw_status_page;
+ drm_i830_sarea_t *sarea_priv = (drm_i830_sarea_t *)
+ dev_priv->sarea_priv;
+
+ DRM_DEBUG("getbuf\n");
+ if (copy_from_user(&d, (drm_i830_dma_t *)arg, sizeof(d)))
+ return -EFAULT;
+
+ if(!_DRM_LOCK_IS_HELD(dev->lock.hw_lock->lock)) {
+ DRM_ERROR("i830_dma called without lock held\n");
+ return -EINVAL;
+ }
+
+ d.granted = 0;
+
+ retcode = i830_dma_get_buffer(dev, &d, filp);
+
+ DRM_DEBUG("i830_dma: %d returning %d, granted = %d\n",
+ current->pid, retcode, d.granted);
+
+ if (copy_to_user((drm_dma_t *)arg, &d, sizeof(d)))
+ return -EFAULT;
+ sarea_priv->last_dispatch = (int) hw_status[5];
+
+ return retcode;
+}
+
+int i830_copybuf(struct inode *inode, struct file *filp, unsigned int cmd,
+ unsigned long arg)
+{
+ drm_file_t *priv = filp->private_data;
+ drm_device_t *dev = priv->dev;
+ drm_i830_copy_t d;
+ drm_i830_private_t *dev_priv = (drm_i830_private_t *)dev->dev_private;
+ u32 *hw_status = (u32 *)dev_priv->hw_status_page;
+ drm_i830_sarea_t *sarea_priv = (drm_i830_sarea_t *)
+ dev_priv->sarea_priv;
+ drm_buf_t *buf;
+ drm_i830_buf_priv_t *buf_priv;
+ drm_device_dma_t *dma = dev->dma;
+
+ if(!_DRM_LOCK_IS_HELD(dev->lock.hw_lock->lock)) {
+ DRM_ERROR("i830_dma called without lock held\n");
+ return -EINVAL;
+ }
+
+ if (copy_from_user(&d, (drm_i830_copy_t *)arg, sizeof(d)))
+ return -EFAULT;
+
+ if(d.idx < 0 || d.idx > dma->buf_count) return -EINVAL;
+ buf = dma->buflist[ d.idx ];
+ buf_priv = buf->dev_private;
+ if (buf_priv->currently_mapped != I830_BUF_MAPPED) return -EPERM;
+
+ if(d.used < 0 || d.used > buf->total) return -EINVAL;
+
+ if (copy_from_user(buf_priv->virtual, d.address, d.used))
+ return -EFAULT;
+
+ sarea_priv->last_dispatch = (int) hw_status[5];
+
+ return 0;
+}
+
+int i830_docopy(struct inode *inode, struct file *filp, unsigned int cmd,
+ unsigned long arg)
+{
+ if(VM_DONTCOPY == 0) return 1;
+ return 0;
+}
diff --git a/linux/i830_drm.h b/linux/i830_drm.h
new file mode 100644
index 00000000..e4a2a257
--- /dev/null
+++ b/linux/i830_drm.h
@@ -0,0 +1,238 @@
+#ifndef _I830_DRM_H_
+#define _I830_DRM_H_
+
+/* WARNING: These defines must be the same as what the Xserver uses.
+ * if you change them, you must change the defines in the Xserver.
+ */
+
+#ifndef _I830_DEFINES_
+#define _I830_DEFINES_
+
+#define I830_DMA_BUF_ORDER 12
+#define I830_DMA_BUF_SZ (1<<I830_DMA_BUF_ORDER)
+#define I830_DMA_BUF_NR 256
+#define I830_NR_SAREA_CLIPRECTS 8
+
+/* Each region is a minimum of 64k, and there are at most 64 of them.
+ */
+#define I830_NR_TEX_REGIONS 64
+#define I830_LOG_MIN_TEX_REGION_SIZE 16
+
+/* if defining I830_ENABLE_4_TEXTURES, do it in i830_3d_reg.h, too */
+#if !defined(I830_ENABLE_4_TEXTURES)
+#define I830_TEXTURE_COUNT 2
+#define I830_TEXBLEND_COUNT 2 /* always same as TEXTURE_COUNT? */
+#else /* defined(I830_ENABLE_4_TEXTURES) */
+#define I830_TEXTURE_COUNT 4
+#define I830_TEXBLEND_COUNT 4 /* always same as TEXTURE_COUNT? */
+#endif /* I830_ENABLE_4_TEXTURES */
+
+#define I830_TEXBLEND_SIZE 12 /* (4 args + op) * 2 + COLOR_FACTOR */
+
+#define I830_UPLOAD_CTX 0x1
+#define I830_UPLOAD_BUFFERS 0x2
+#define I830_UPLOAD_CLIPRECTS 0x4
+#define I830_UPLOAD_TEX0_IMAGE 0x100 /* handled clientside */
+#define I830_UPLOAD_TEX0_CUBE 0x200 /* handled clientside */
+#define I830_UPLOAD_TEX1_IMAGE 0x400 /* handled clientside */
+#define I830_UPLOAD_TEX1_CUBE 0x800 /* handled clientside */
+#define I830_UPLOAD_TEX2_IMAGE 0x1000 /* handled clientside */
+#define I830_UPLOAD_TEX2_CUBE 0x2000 /* handled clientside */
+#define I830_UPLOAD_TEX3_IMAGE 0x4000 /* handled clientside */
+#define I830_UPLOAD_TEX3_CUBE 0x8000 /* handled clientside */
+#define I830_UPLOAD_TEX_N_IMAGE(n) (0x100 << (n * 2))
+#define I830_UPLOAD_TEX_N_CUBE(n) (0x200 << (n * 2))
+#define I830_UPLOAD_TEXIMAGE_MASK 0xff00
+#define I830_UPLOAD_TEX0 0x10000
+#define I830_UPLOAD_TEX1 0x20000
+#define I830_UPLOAD_TEX2 0x40000
+#define I830_UPLOAD_TEX3 0x80000
+#define I830_UPLOAD_TEX_N(n) (0x10000 << (n))
+#define I830_UPLOAD_TEX_MASK 0xf0000
+#define I830_UPLOAD_TEXBLEND0 0x100000
+#define I830_UPLOAD_TEXBLEND1 0x200000
+#define I830_UPLOAD_TEXBLEND2 0x400000
+#define I830_UPLOAD_TEXBLEND3 0x800000
+#define I830_UPLOAD_TEXBLEND_N(n) (0x100000 << (n))
+#define I830_UPLOAD_TEXBLEND_MASK 0xf00000
+#define I830_UPLOAD_TEX_PALETTE_N(n) (0x1000000 << (n))
+#define I830_UPLOAD_TEX_PALETTE_SHARED 0x4000000
+
+/* Indices into buf.Setup where various bits of state are mirrored per
+ * context and per buffer. These can be fired at the card as a unit,
+ * or in a piecewise fashion as required.
+ */
+
+/* Destbuffer state
+ * - backbuffer linear offset and pitch -- invarient in the current dri
+ * - zbuffer linear offset and pitch -- also invarient
+ * - drawing origin in back and depth buffers.
+ *
+ * Keep the depth/back buffer state here to acommodate private buffers
+ * in the future.
+ */
+
+#define I830_DESTREG_CBUFADDR 0
+/* Invarient */
+#define I830_DESTREG_DBUFADDR 1
+#define I830_DESTREG_DV0 2
+#define I830_DESTREG_DV1 3
+#define I830_DESTREG_SENABLE 4
+#define I830_DESTREG_SR0 5
+#define I830_DESTREG_SR1 6
+#define I830_DESTREG_SR2 7
+#define I830_DESTREG_DR0 8
+#define I830_DESTREG_DR1 9
+#define I830_DESTREG_DR2 10
+#define I830_DESTREG_DR3 11
+#define I830_DESTREG_DR4 12
+#define I830_DEST_SETUP_SIZE 13
+
+/* Context state
+ */
+#define I830_CTXREG_STATE1 0
+#define I830_CTXREG_STATE2 1
+#define I830_CTXREG_STATE3 2
+#define I830_CTXREG_STATE4 3
+#define I830_CTXREG_STATE5 4
+#define I830_CTXREG_IALPHAB 5
+#define I830_CTXREG_STENCILTST 6
+#define I830_CTXREG_ENABLES_1 7
+#define I830_CTXREG_ENABLES_2 8
+#define I830_CTXREG_AA 9
+#define I830_CTXREG_FOGCOLOR 10
+#define I830_CTXREG_BLENDCOLR0 11
+#define I830_CTXREG_BLENDCOLR 12 /* Dword 1 of 2 dword command */
+#define I830_CTXREG_VF 13
+#define I830_CTXREG_VF2 14
+#define I830_CTXREG_MCSB0 15
+#define I830_CTXREG_MCSB1 16
+#define I830_CTX_SETUP_SIZE 17
+
+/* Texture state (per tex unit)
+ */
+
+#define I830_TEXREG_MI0 0 /* GFX_OP_MAP_INFO (6 dwords) */
+#define I830_TEXREG_MI1 1
+#define I830_TEXREG_MI2 2
+#define I830_TEXREG_MI3 3
+#define I830_TEXREG_MI4 4
+#define I830_TEXREG_MI5 5
+#define I830_TEXREG_MF 6 /* GFX_OP_MAP_FILTER */
+#define I830_TEXREG_MLC 7 /* GFX_OP_MAP_LOD_CTL */
+#define I830_TEXREG_MLL 8 /* GFX_OP_MAP_LOD_LIMITS */
+#define I830_TEXREG_MCS 9 /* GFX_OP_MAP_COORD_SETS */
+#define I830_TEX_SETUP_SIZE 10
+
+#define I830_FRONT 0x1
+#define I830_BACK 0x2
+#define I830_DEPTH 0x4
+
+#endif /* _I830_DEFINES_ */
+
+typedef struct _drm_i830_init {
+ enum {
+ I830_INIT_DMA = 0x01,
+ I830_CLEANUP_DMA = 0x02
+ } func;
+ unsigned int mmio_offset;
+ unsigned int buffers_offset;
+ int sarea_priv_offset;
+ unsigned int ring_start;
+ unsigned int ring_end;
+ unsigned int ring_size;
+ unsigned int front_offset;
+ unsigned int back_offset;
+ unsigned int depth_offset;
+ unsigned int w;
+ unsigned int h;
+ unsigned int pitch;
+ unsigned int pitch_bits;
+ unsigned int back_pitch;
+ unsigned int depth_pitch;
+ unsigned int cpp;
+} drm_i830_init_t;
+
+/* Warning: If you change the SAREA structure you must change the Xserver
+ * structure as well */
+
+typedef struct _drm_i830_tex_region {
+ unsigned char next, prev; /* indices to form a circular LRU */
+ unsigned char in_use; /* owned by a client, or free? */
+ int age; /* tracked by clients to update local LRU's */
+} drm_i830_tex_region_t;
+
+typedef struct _drm_i830_sarea {
+ unsigned int ContextState[I830_CTX_SETUP_SIZE];
+ unsigned int BufferState[I830_DEST_SETUP_SIZE];
+ unsigned int TexState[I830_TEXTURE_COUNT][I830_TEX_SETUP_SIZE];
+ unsigned int TexBlendState[I830_TEXBLEND_COUNT][I830_TEXBLEND_SIZE];
+ unsigned int TexBlendStateWordsUsed[I830_TEXBLEND_COUNT];
+ unsigned int Palette[2][256];
+ unsigned int dirty;
+
+ unsigned int nbox;
+ drm_clip_rect_t boxes[I830_NR_SAREA_CLIPRECTS];
+
+ /* Maintain an LRU of contiguous regions of texture space. If
+ * you think you own a region of texture memory, and it has an
+ * age different to the one you set, then you are mistaken and
+ * it has been stolen by another client. If global texAge
+ * hasn't changed, there is no need to walk the list.
+ *
+ * These regions can be used as a proxy for the fine-grained
+ * texture information of other clients - by maintaining them
+ * in the same lru which is used to age their own textures,
+ * clients have an approximate lru for the whole of global
+ * texture space, and can make informed decisions as to which
+ * areas to kick out. There is no need to choose whether to
+ * kick out your own texture or someone else's - simply eject
+ * them all in LRU order.
+ */
+
+ drm_i830_tex_region_t texList[I830_NR_TEX_REGIONS+1];
+ /* Last elt is sentinal */
+ int texAge; /* last time texture was uploaded */
+ int last_enqueue; /* last time a buffer was enqueued */
+ int last_dispatch; /* age of the most recently dispatched buffer */
+ int last_quiescent; /* */
+ int ctxOwner; /* last context to upload state */
+
+ int vertex_prim;
+} drm_i830_sarea_t;
+
+typedef struct _drm_i830_clear {
+ int clear_color;
+ int clear_depth;
+ int flags;
+ unsigned int clear_colormask;
+ unsigned int clear_depthmask;
+} drm_i830_clear_t;
+
+
+
+/* These may be placeholders if we have more cliprects than
+ * I830_NR_SAREA_CLIPRECTS. In that case, the client sets discard to
+ * false, indicating that the buffer will be dispatched again with a
+ * new set of cliprects.
+ */
+typedef struct _drm_i830_vertex {
+ int idx; /* buffer index */
+ int used; /* nr bytes in use */
+ int discard; /* client is finished with the buffer? */
+} drm_i830_vertex_t;
+
+typedef struct _drm_i830_copy_t {
+ int idx; /* buffer index */
+ int used; /* nr bytes in use */
+ void *address; /* Address to copy from */
+} drm_i830_copy_t;
+
+typedef struct drm_i830_dma {
+ void *virtual;
+ int request_idx;
+ int request_size;
+ int granted;
+} drm_i830_dma_t;
+
+#endif /* _I830_DRM_H_ */
diff --git a/linux/i830_drv.c b/linux/i830_drv.c
new file mode 100644
index 00000000..904f3660
--- /dev/null
+++ b/linux/i830_drv.c
@@ -0,0 +1,102 @@
+/* i830_drv.c -- I810 driver -*- linux-c -*-
+ * Created: Mon Dec 13 01:56:22 1999 by jhartmann@precisioninsight.com
+ *
+ * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
+ * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors:
+ * Rickard E. (Rik) Faith <faith@valinux.com>
+ * Jeff Hartmann <jhartmann@valinux.com>
+ * Gareth Hughes <gareth@valinux.com>
+ * Abraham vd Merwe <abraham@2d3d.co.za>
+ */
+
+#include <linux/config.h>
+#include "i830.h"
+#include "drmP.h"
+#include "i830_drv.h"
+
+#define DRIVER_AUTHOR "VA Linux Systems Inc."
+
+#define DRIVER_NAME "i830"
+#define DRIVER_DESC "Intel 830M"
+#define DRIVER_DATE "20011004"
+
+#define DRIVER_MAJOR 1
+#define DRIVER_MINOR 2
+#define DRIVER_PATCHLEVEL 0
+
+#define DRIVER_IOCTLS \
+ [DRM_IOCTL_NR(DRM_IOCTL_I830_INIT)] = { i830_dma_init, 1, 1 }, \
+ [DRM_IOCTL_NR(DRM_IOCTL_I830_VERTEX)] = { i830_dma_vertex, 1, 0 }, \
+ [DRM_IOCTL_NR(DRM_IOCTL_I830_CLEAR)] = { i830_clear_bufs, 1, 0 }, \
+ [DRM_IOCTL_NR(DRM_IOCTL_I830_FLUSH)] = { i830_flush_ioctl, 1, 0 }, \
+ [DRM_IOCTL_NR(DRM_IOCTL_I830_GETAGE)] = { i830_getage, 1, 0 }, \
+ [DRM_IOCTL_NR(DRM_IOCTL_I830_GETBUF)] = { i830_getbuf, 1, 0 }, \
+ [DRM_IOCTL_NR(DRM_IOCTL_I830_SWAP)] = { i830_swap_bufs, 1, 0 }, \
+ [DRM_IOCTL_NR(DRM_IOCTL_I830_COPY)] = { i830_copybuf, 1, 0 }, \
+ [DRM_IOCTL_NR(DRM_IOCTL_I830_DOCOPY)] = { i830_docopy, 1, 0 },
+
+#define __HAVE_COUNTERS 4
+#define __HAVE_COUNTER6 _DRM_STAT_IRQ
+#define __HAVE_COUNTER7 _DRM_STAT_PRIMARY
+#define __HAVE_COUNTER8 _DRM_STAT_SECONDARY
+#define __HAVE_COUNTER9 _DRM_STAT_DMA
+
+
+#include "drm_agpsupport.h"
+#include "drm_auth.h"
+#include "drm_bufs.h"
+#include "drm_context.h"
+#include "drm_dma.h"
+#include "drm_drawable.h"
+#include "drm_drv.h"
+
+#ifndef MODULE
+/* DRM(options) is called by the kernel to parse command-line options
+ * passed via the boot-loader (e.g., LILO). It calls the insmod option
+ * routine, drm_parse_drm.
+ */
+
+/* JH- We have to hand expand the string ourselves because of the cpp. If
+ * anyone can think of a way that we can fit into the __setup macro without
+ * changing it, then please send the solution my way.
+ */
+static int __init i830_options( char *str )
+{
+ DRM(parse_options)( str );
+ return 1;
+}
+
+__setup( DRIVER_NAME "=", i830_options );
+#endif
+
+#include "drm_fops.h"
+#include "drm_init.h"
+#include "drm_ioctl.h"
+#include "drm_lock.h"
+#include "drm_lists.h"
+#include "drm_memory.h"
+#include "drm_proc.h"
+#include "drm_vm.h"
+#include "drm_stub.h"
diff --git a/linux/i830_drv.h b/linux/i830_drv.h
new file mode 100644
index 00000000..4e9d6c80
--- /dev/null
+++ b/linux/i830_drv.h
@@ -0,0 +1,213 @@
+/* i830_drv.h -- Private header for the I830 driver -*- linux-c -*-
+ * Created: Mon Dec 13 01:50:01 1999 by jhartmann@precisioninsight.com
+ *
+ * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
+ * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
+ * All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Rickard E. (Rik) Faith <faith@valinux.com>
+ * Jeff Hartmann <jhartmann@valinux.com>
+ *
+ */
+
+#ifndef _I830_DRV_H_
+#define _I830_DRV_H_
+
+typedef struct drm_i830_buf_priv {
+ u32 *in_use;
+ int my_use_idx;
+ int currently_mapped;
+ void *virtual;
+ void *kernel_virtual;
+ int map_count;
+ struct vm_area_struct *vma;
+} drm_i830_buf_priv_t;
+
+typedef struct _drm_i830_ring_buffer{
+ int tail_mask;
+ unsigned long Start;
+ unsigned long End;
+ unsigned long Size;
+ u8 *virtual_start;
+ int head;
+ int tail;
+ int space;
+} drm_i830_ring_buffer_t;
+
+typedef struct drm_i830_private {
+ drm_map_t *sarea_map;
+ drm_map_t *buffer_map;
+ drm_map_t *mmio_map;
+
+ drm_i830_sarea_t *sarea_priv;
+ drm_i830_ring_buffer_t ring;
+
+ unsigned long hw_status_page;
+ unsigned long counter;
+
+ atomic_t flush_done;
+ wait_queue_head_t flush_queue; /* Processes waiting until flush */
+ drm_buf_t *mmap_buffer;
+
+ u32 front_di1, back_di1, zi1;
+
+ int back_offset;
+ int depth_offset;
+ int w, h;
+ int pitch;
+ int back_pitch;
+ int depth_pitch;
+ unsigned int cpp;
+} drm_i830_private_t;
+
+ /* i830_dma.c */
+extern int i830_dma_schedule(drm_device_t *dev, int locked);
+extern int i830_getbuf(struct inode *inode, struct file *filp,
+ unsigned int cmd, unsigned long arg);
+extern int i830_dma_init(struct inode *inode, struct file *filp,
+ unsigned int cmd, unsigned long arg);
+extern int i830_flush_ioctl(struct inode *inode, struct file *filp,
+ unsigned int cmd, unsigned long arg);
+extern void i830_reclaim_buffers(drm_device_t *dev, pid_t pid);
+extern int i830_getage(struct inode *inode, struct file *filp, unsigned int cmd,
+ unsigned long arg);
+extern int i830_mmap_buffers(struct file *filp, struct vm_area_struct *vma);
+extern int i830_copybuf(struct inode *inode, struct file *filp,
+ unsigned int cmd, unsigned long arg);
+extern int i830_docopy(struct inode *inode, struct file *filp,
+ unsigned int cmd, unsigned long arg);
+
+extern void i830_dma_quiescent(drm_device_t *dev);
+
+extern int i830_dma_vertex(struct inode *inode, struct file *filp,
+ unsigned int cmd, unsigned long arg);
+
+extern int i830_swap_bufs(struct inode *inode, struct file *filp,
+ unsigned int cmd, unsigned long arg);
+
+extern int i830_clear_bufs(struct inode *inode, struct file *filp,
+ unsigned int cmd, unsigned long arg);
+
+#define I830_VERBOSE 0
+
+#define I830_BASE(reg) ((unsigned long) \
+ dev_priv->mmio_map->handle)
+#define I830_ADDR(reg) (I830_BASE(reg) + reg)
+#define I830_DEREF(reg) *(__volatile__ int *)I830_ADDR(reg)
+#define I830_READ(reg) I830_DEREF(reg)
+#define I830_WRITE(reg,val) do { I830_DEREF(reg) = val; } while (0)
+#define I830_DEREF16(reg) *(__volatile__ u16 *)I830_ADDR(reg)
+#define I830_READ16(reg) I830_DEREF16(reg)
+#define I830_WRITE16(reg,val) do { I830_DEREF16(reg) = val; } while (0)
+
+#define GFX_OP_USER_INTERRUPT ((0<<29)|(2<<23))
+#define GFX_OP_BREAKPOINT_INTERRUPT ((0<<29)|(1<<23))
+#define CMD_REPORT_HEAD (7<<23)
+#define CMD_STORE_DWORD_IDX ((0x21<<23) | 0x1)
+#define CMD_OP_BATCH_BUFFER ((0x0<<29)|(0x30<<23)|0x1)
+
+#define INST_PARSER_CLIENT 0x00000000
+#define INST_OP_FLUSH 0x02000000
+#define INST_FLUSH_MAP_CACHE 0x00000001
+
+
+#define BB1_START_ADDR_MASK (~0x7)
+#define BB1_PROTECTED (1<<0)
+#define BB1_UNPROTECTED (0<<0)
+#define BB2_END_ADDR_MASK (~0x7)
+
+#define I830REG_HWSTAM 0x02098
+#define I830REG_INT_IDENTITY_R 0x020a4
+#define I830REG_INT_MASK_R 0x020a8
+#define I830REG_INT_ENABLE_R 0x020a0
+
+#define LP_RING 0x2030
+#define HP_RING 0x2040
+#define RING_TAIL 0x00
+#define TAIL_ADDR 0x000FFFF8
+#define RING_HEAD 0x04
+#define HEAD_WRAP_COUNT 0xFFE00000
+#define HEAD_WRAP_ONE 0x00200000
+#define HEAD_ADDR 0x001FFFFC
+#define RING_START 0x08
+#define START_ADDR 0x00FFFFF8
+#define RING_LEN 0x0C
+#define RING_NR_PAGES 0x000FF000
+#define RING_REPORT_MASK 0x00000006
+#define RING_REPORT_64K 0x00000002
+#define RING_REPORT_128K 0x00000004
+#define RING_NO_REPORT 0x00000000
+#define RING_VALID_MASK 0x00000001
+#define RING_VALID 0x00000001
+#define RING_INVALID 0x00000000
+
+#define GFX_OP_SCISSOR ((0x3<<29)|(0x1c<<24)|(0x10<<19))
+#define SC_UPDATE_SCISSOR (0x1<<1)
+#define SC_ENABLE_MASK (0x1<<0)
+#define SC_ENABLE (0x1<<0)
+
+#define GFX_OP_SCISSOR_INFO ((0x3<<29)|(0x1d<<24)|(0x81<<16)|(0x1))
+#define SCI_YMIN_MASK (0xffff<<16)
+#define SCI_XMIN_MASK (0xffff<<0)
+#define SCI_YMAX_MASK (0xffff<<16)
+#define SCI_XMAX_MASK (0xffff<<0)
+
+#define GFX_OP_SCISSOR_ENABLE ((0x3<<29)|(0x1c<<24)|(0x10<<19))
+#define GFX_OP_SCISSOR_RECT ((0x3<<29)|(0x1d<<24)|(0x81<<16)|1)
+#define GFX_OP_COLOR_FACTOR ((0x3<<29)|(0x1d<<24)|(0x1<<16)|0x0)
+#define GFX_OP_STIPPLE ((0x3<<29)|(0x1d<<24)|(0x83<<16))
+#define GFX_OP_MAP_INFO ((0x3<<29)|(0x1d<<24)|0x4)
+#define GFX_OP_DESTBUFFER_VARS ((0x3<<29)|(0x1d<<24)|(0x85<<16)|0x0)
+#define GFX_OP_DRAWRECT_INFO ((0x3<<29)|(0x1d<<24)|(0x80<<16)|(0x3))
+#define GFX_OP_PRIMITIVE ((0x3<<29)|(0x1f<<24))
+
+#define CMD_OP_DESTBUFFER_INFO ((0x3<<29)|(0x1d<<24)|(0x8e<<16)|1)
+
+
+#define BR00_BITBLT_CLIENT 0x40000000
+#define BR00_OP_COLOR_BLT 0x10000000
+#define BR00_OP_SRC_COPY_BLT 0x10C00000
+#define BR13_SOLID_PATTERN 0x80000000
+
+#define BUF_3D_ID_COLOR_BACK (0x3<<24)
+#define BUF_3D_ID_DEPTH (0x7<<24)
+#define BUF_3D_USE_FENCE (1<<23)
+#define BUF_3D_PITCH(x) (((x)/4)<<2)
+
+#define CMD_OP_MAP_PALETTE_LOAD ((3<<29)|(0x1d<<24)|(0x82<<16)|255)
+#define MAP_PALETTE_NUM(x) ((x<<8) & (1<<8))
+#define MAP_PALETTE_BOTH (1<<11)
+
+#define XY_COLOR_BLT_CMD ((2<<29)|(0x50<<22)|0x4)
+#define XY_COLOR_BLT_WRITE_ALPHA (1<<21)
+#define XY_COLOR_BLT_WRITE_RGB (1<<20)
+
+#define XY_SRC_COPY_BLT_CMD ((2<<29)|(0x53<<22)|6)
+#define XY_SRC_COPY_BLT_WRITE_ALPHA (1<<21)
+#define XY_SRC_COPY_BLT_WRITE_RGB (1<<20)
+
+#define MI_BATCH_BUFFER ((0x30<<23)|1)
+#define MI_BATCH_NON_SECURE (1)
+
+
+#endif
+
diff --git a/linux/picker.c b/linux/picker.c
index 4b4fbe90..6c228dfc 100644
--- a/linux/picker.c
+++ b/linux/picker.c
@@ -22,14 +22,9 @@
#define CONFIG_AGP 0
#endif
-#ifndef CONFIG_FB_SIS
-#define CONFIG_FB_SIS 0
-#endif
-
SMP = CONFIG_SMP
MODULES = CONFIG_MODULES
MODVERSIONS = CONFIG_MODVERSIONS
AGP = CONFIG_AGP
AGP_MODULE = CONFIG_AGP_MODULE
RELEASE = UTS_RELEASE
-SIS = CONFIG_FB_SIS
diff --git a/linux/r128_cce.c b/linux/r128_cce.c
index 2c03e6ba..ef11a497 100644
--- a/linux/r128_cce.c
+++ b/linux/r128_cce.c
@@ -350,20 +350,11 @@ static void r128_cce_init_ring_buffer( drm_device_t *dev,
tmp_ofs = dev_priv->ring_rptr->offset - dev->sg->handle;
page_ofs = tmp_ofs >> PAGE_SHIFT;
-#if defined(__alpha__) && (LINUX_VERSION_CODE >= 0x020400)
R128_WRITE( R128_PM4_BUFFER_DL_RPTR_ADDR,
entry->busaddr[page_ofs]);
DRM_DEBUG( "ring rptr: offset=0x%08x handle=0x%08lx\n",
entry->busaddr[page_ofs],
entry->handle + tmp_ofs );
-#else
- R128_WRITE( R128_PM4_BUFFER_DL_RPTR_ADDR,
- page_to_bus(entry->pagelist[page_ofs]));
-
- DRM_DEBUG( "ring rptr: offset=0x%08x handle=0x%08lx\n",
- page_to_bus(entry->pagelist[page_ofs]),
- entry->handle + tmp_ofs );
-#endif
}
/* Set watermark control */
diff --git a/linux/r128_drv.c b/linux/r128_drv.c
index d4eda6c8..d8d7be4f 100644
--- a/linux/r128_drv.c
+++ b/linux/r128_drv.c
@@ -39,11 +39,11 @@
#define DRIVER_NAME "r128"
#define DRIVER_DESC "ATI Rage 128"
-#define DRIVER_DATE "20010405"
+#define DRIVER_DATE "20010917"
#define DRIVER_MAJOR 2
-#define DRIVER_MINOR 1
-#define DRIVER_PATCHLEVEL 6
+#define DRIVER_MINOR 2
+#define DRIVER_PATCHLEVEL 0
#define DRIVER_IOCTLS \
[DRM_IOCTL_NR(DRM_IOCTL_DMA)] = { r128_cce_buffers, 1, 0 }, \
diff --git a/linux/r128_state.c b/linux/r128_state.c
index 66af5c45..9de1b6b9 100644
--- a/linux/r128_state.c
+++ b/linux/r128_state.c
@@ -1519,10 +1519,75 @@ int r128_cce_indirect( struct inode *inode, struct file *filp,
{
drm_file_t *priv = filp->private_data;
drm_device_t *dev = priv->dev;
+ drm_r128_private_t *dev_priv = dev->dev_private;
+ drm_device_dma_t *dma = dev->dma;
+ drm_buf_t *buf;
+ drm_r128_buf_priv_t *buf_priv;
+ drm_r128_indirect_t indirect;
+#if 0
+ RING_LOCALS;
+#endif
LOCK_TEST_WITH_RETURN( dev );
- /* Indirect buffer firing is not supported at this time.
+ if ( !dev_priv ) {
+ DRM_ERROR( "%s called with no initialization\n", __FUNCTION__ );
+ return -EINVAL;
+ }
+
+ if ( copy_from_user( &indirect, (drm_r128_indirect_t *)arg,
+ sizeof(indirect) ) )
+ return -EFAULT;
+
+ DRM_DEBUG( "indirect: idx=%d s=%d e=%d d=%d\n",
+ indirect.idx, indirect.start,
+ indirect.end, indirect.discard );
+
+ if ( indirect.idx < 0 || indirect.idx >= dma->buf_count ) {
+ DRM_ERROR( "buffer index %d (of %d max)\n",
+ indirect.idx, dma->buf_count - 1 );
+ return -EINVAL;
+ }
+
+ buf = dma->buflist[indirect.idx];
+ buf_priv = buf->dev_private;
+
+ if ( buf->pid != current->pid ) {
+ DRM_ERROR( "process %d using buffer owned by %d\n",
+ current->pid, buf->pid );
+ return -EINVAL;
+ }
+ if ( buf->pending ) {
+ DRM_ERROR( "sending pending buffer %d\n", indirect.idx );
+ return -EINVAL;
+ }
+
+ if ( indirect.start < buf->used ) {
+ DRM_ERROR( "reusing indirect: start=0x%x actual=0x%x\n",
+ indirect.start, buf->used );
+ return -EINVAL;
+ }
+
+ RING_SPACE_TEST_WITH_RETURN( dev_priv );
+ VB_AGE_TEST_WITH_RETURN( dev_priv );
+
+ buf->used = indirect.end;
+ buf_priv->discard = indirect.discard;
+
+#if 0
+ /* Wait for the 3D stream to idle before the indirect buffer
+ * containing 2D acceleration commands is processed.
*/
- return -EINVAL;
+ BEGIN_RING( 2 );
+ RADEON_WAIT_UNTIL_3D_IDLE();
+ ADVANCE_RING();
+#endif
+
+ /* Dispatch the indirect buffer full of commands from the
+ * X server. This is insecure and is thus only available to
+ * privileged clients.
+ */
+ r128_cce_dispatch_indirect( dev, buf, indirect.start, indirect.end );
+
+ return 0;
}
diff --git a/linux/radeon_cp.c b/linux/radeon_cp.c
index 6d508a5c..904c8b77 100644
--- a/linux/radeon_cp.c
+++ b/linux/radeon_cp.c
@@ -543,8 +543,7 @@ static int radeon_do_engine_reset( drm_device_t *dev )
RADEON_SOFT_RESET_RE |
RADEON_SOFT_RESET_PP |
RADEON_SOFT_RESET_E2 |
- RADEON_SOFT_RESET_RB |
- RADEON_SOFT_RESET_HDP ) );
+ RADEON_SOFT_RESET_RB ) );
RADEON_READ( RADEON_RBBM_SOFT_RESET );
RADEON_WRITE( RADEON_RBBM_SOFT_RESET, ( rbbm_soft_reset &
~( RADEON_SOFT_RESET_CP |
@@ -553,8 +552,7 @@ static int radeon_do_engine_reset( drm_device_t *dev )
RADEON_SOFT_RESET_RE |
RADEON_SOFT_RESET_PP |
RADEON_SOFT_RESET_E2 |
- RADEON_SOFT_RESET_RB |
- RADEON_SOFT_RESET_HDP ) ) );
+ RADEON_SOFT_RESET_RB ) ) );
RADEON_READ( RADEON_RBBM_SOFT_RESET );
@@ -622,20 +620,12 @@ static void radeon_cp_init_ring_buffer( drm_device_t *dev,
tmp_ofs = dev_priv->ring_rptr->offset - dev->sg->handle;
page_ofs = tmp_ofs >> PAGE_SHIFT;
-#if defined(__alpha__) && (LINUX_VERSION_CODE >= 0x020400)
+
RADEON_WRITE( RADEON_CP_RB_RPTR_ADDR,
entry->busaddr[page_ofs]);
DRM_DEBUG( "ring rptr: offset=0x%08x handle=0x%08lx\n",
entry->busaddr[page_ofs],
entry->handle + tmp_ofs );
-#else
- RADEON_WRITE( RADEON_CP_RB_RPTR_ADDR,
- page_to_bus(entry->pagelist[page_ofs]));
-
- DRM_DEBUG( "ring rptr: offset=0x%08x handle=0x%08lx\n",
- page_to_bus(entry->pagelist[page_ofs]),
- entry->handle + tmp_ofs );
-#endif
}
/* Set ring buffer size */
diff --git a/linux/radeon_state.c b/linux/radeon_state.c
index 0a209245..9322292a 100644
--- a/linux/radeon_state.c
+++ b/linux/radeon_state.c
@@ -1058,7 +1058,7 @@ static int radeon_cp_dispatch_texture( drm_device_t *dev,
DRM_ERROR( "EFAULT on tex->image\n" );
return -EFAULT;
}
- } else if ( size < 4 ) {
+ } else if ( size < 4 && size > 0 ) {
size = 4;
}
diff --git a/linux/sis.h b/linux/sis.h
index 2e5f7bb3..02f03086 100644
--- a/linux/sis.h
+++ b/linux/sis.h
@@ -24,7 +24,7 @@
* DEALINGS IN THE SOFTWARE.
*
*/
-/* $XFree86: xc/programs/Xserver/hw/xfree86/os-support/linux/drm/kernel/sis.h,v 1.1 2001/05/19 18:29:22 dawes Exp $ */
+/* $XFree86: xc/programs/Xserver/hw/xfree86/os-support/linux/drm/kernel/sis.h,v 1.2 2001/12/19 21:25:59 dawes Exp $ */
#ifndef __SIS_H__
#define __SIS_H__
@@ -47,4 +47,10 @@
#define DRIVER_AGP_BUFFERS_MAP( dev ) \
((drm_sis_private_t *)((dev)->dev_private))->buffers
+extern int sis_init_context(int context);
+extern int sis_final_context(int context);
+
+#define DRIVER_CTX_CTOR sis_init_context
+#define DRIVER_CTX_DTOR sis_final_context
+
#endif
diff --git a/linux/sis_drv.c b/linux/sis_drv.c
index 3dd83fd7..2d9612f5 100644
--- a/linux/sis_drv.c
+++ b/linux/sis_drv.c
@@ -40,12 +40,12 @@
#define DRIVER_PATCHLEVEL 0
#define DRIVER_IOCTLS \
- [DRM_IOCTL_NR(SIS_IOCTL_FB_ALLOC)] = { sis_fb_alloc, 1, 1 }, \
- [DRM_IOCTL_NR(SIS_IOCTL_FB_FREE)] = { sis_fb_free, 1, 1 }, \
+ [DRM_IOCTL_NR(SIS_IOCTL_FB_ALLOC)] = { sis_fb_alloc, 1, 0 }, \
+ [DRM_IOCTL_NR(SIS_IOCTL_FB_FREE)] = { sis_fb_free, 1, 0 }, \
/* AGP Memory Management */ \
- [DRM_IOCTL_NR(SIS_IOCTL_AGP_INIT)] = { sisp_agp_init, 1, 1 }, \
- [DRM_IOCTL_NR(SIS_IOCTL_AGP_ALLOC)] = { sisp_agp_alloc, 1, 1 }, \
- [DRM_IOCTL_NR(SIS_IOCTL_AGP_FREE)] = { sisp_agp_free, 1, 1 }
+ [DRM_IOCTL_NR(SIS_IOCTL_AGP_INIT)] = { sisp_agp_init, 1, 0 }, \
+ [DRM_IOCTL_NR(SIS_IOCTL_AGP_ALLOC)] = { sisp_agp_alloc, 1, 0 }, \
+ [DRM_IOCTL_NR(SIS_IOCTL_AGP_FREE)] = { sisp_agp_free, 1, 0 }
#if 0 /* these don't appear to be defined */
/* SIS Stereo */
[DRM_IOCTL_NR(DRM_IOCTL_CONTROL)] = { sis_control, 1, 1 },
diff --git a/linux/sis_ds.c b/linux/sis_ds.c
index 6143ad83..95880a48 100644
--- a/linux/sis_ds.c
+++ b/linux/sis_ds.c
@@ -33,7 +33,7 @@
#include <linux/delay.h>
#include <linux/errno.h>
#include <linux/kernel.h>
-#include <linux/malloc.h>
+#include <linux/slab.h>
#include <linux/poll.h>
#include <asm/io.h>
#include <linux/pci.h>
diff --git a/linux/sis_mm.c b/linux/sis_mm.c
index 2c2e0d95..81832769 100644
--- a/linux/sis_mm.c
+++ b/linux/sis_mm.c
@@ -72,7 +72,7 @@ static int del_alloc_set(int context, int type, unsigned int val)
}
/* fb management via fb device */
-#if 0
+#if 1
int sis_fb_alloc(struct inode *inode, struct file *filp, unsigned int cmd,
unsigned long arg)
{
@@ -90,7 +90,7 @@ int sis_fb_alloc(struct inode *inode, struct file *filp, unsigned int cmd,
fb.offset = req.offset;
fb.free = req.offset;
if(!add_alloc_set(fb.context, VIDEO_TYPE, fb.free)){
- DRM_DEBUG("adding to allocation set fails");
+ DRM_DEBUG("adding to allocation set fails\n");
sis_free(req.offset);
retval = -1;
}
@@ -185,7 +185,7 @@ int sisp_agp_alloc(struct inode *inode, struct file *filp, unsigned int cmd,
agp.offset = block->ofs;
agp.free = (unsigned int)block;
if(!add_alloc_set(agp.context, AGP_TYPE, agp.free)){
- DRM_DEBUG("adding to allocation set fails");
+ DRM_DEBUG("adding to allocation set fails\n");
mmFreeMem((PMemBlock)agp.free);
retval = -1;
}
@@ -279,9 +279,7 @@ int sis_final_context(int context)
retval = setFirst(set, &item);
while(retval){
DRM_DEBUG("free video memory 0x%x\n", item);
-#if 0
sis_free(item);
-#endif
retval = setNext(set, &item);
}
setDestroy(set);
diff --git a/shared-core/drm.h b/shared-core/drm.h
index ac9f407a..0db586fc 100644
--- a/shared-core/drm.h
+++ b/shared-core/drm.h
@@ -104,9 +104,8 @@ typedef struct drm_tex_region {
#include "i810_drm.h"
#include "r128_drm.h"
#include "radeon_drm.h"
-#ifdef CONFIG_DRM_SIS
#include "sis_drm.h"
-#endif
+#include "i830_drm.h"
typedef struct drm_version {
int version_major; /* Major version */
@@ -449,6 +448,12 @@ typedef struct drm_scatter_gather {
#define DRM_IOCTL_I810_SWAP DRM_IO( 0x46)
#define DRM_IOCTL_I810_COPY DRM_IOW( 0x47, drm_i810_copy_t)
#define DRM_IOCTL_I810_DOCOPY DRM_IO( 0x48)
+#define DRM_IOCTL_I810_OV0INFO DRM_IOR( 0x49, drm_i810_overlay_t)
+#define DRM_IOCTL_I810_FSTATUS DRM_IO ( 0x4a)
+#define DRM_IOCTL_I810_OV0FLIP DRM_IO ( 0x4b)
+#define DRM_IOCTL_I810_MC DRM_IOW( 0x4c, drm_i810_mc_t)
+#define DRM_IOCTL_I810_RSTATUS DRM_IO ( 0x4d )
+
/* Rage 128 specific ioctls */
#define DRM_IOCTL_R128_INIT DRM_IOW( 0x40, drm_r128_init_t)
@@ -483,7 +488,6 @@ typedef struct drm_scatter_gather {
#define DRM_IOCTL_RADEON_INDIRECT DRM_IOWR(0x4d, drm_radeon_indirect_t)
#define DRM_IOCTL_RADEON_TEXTURE DRM_IOWR(0x4e, drm_radeon_texture_t)
-#ifdef CONFIG_DRM_SIS
/* SiS specific ioctls */
#define SIS_IOCTL_FB_ALLOC DRM_IOWR(0x44, drm_sis_mem_t)
#define SIS_IOCTL_FB_FREE DRM_IOW( 0x45, drm_sis_mem_t)
@@ -493,6 +497,16 @@ typedef struct drm_scatter_gather {
#define SIS_IOCTL_FLIP DRM_IOW( 0x48, drm_sis_flip_t)
#define SIS_IOCTL_FLIP_INIT DRM_IO( 0x49)
#define SIS_IOCTL_FLIP_FINAL DRM_IO( 0x50)
-#endif
+
+/* I830 specific ioctls */
+#define DRM_IOCTL_I830_INIT DRM_IOW( 0x40, drm_i830_init_t)
+#define DRM_IOCTL_I830_VERTEX DRM_IOW( 0x41, drm_i830_vertex_t)
+#define DRM_IOCTL_I830_CLEAR DRM_IOW( 0x42, drm_i830_clear_t)
+#define DRM_IOCTL_I830_FLUSH DRM_IO ( 0x43)
+#define DRM_IOCTL_I830_GETAGE DRM_IO ( 0x44)
+#define DRM_IOCTL_I830_GETBUF DRM_IOWR(0x45, drm_i830_dma_t)
+#define DRM_IOCTL_I830_SWAP DRM_IO ( 0x46)
+#define DRM_IOCTL_I830_COPY DRM_IOW( 0x47, drm_i830_copy_t)
+#define DRM_IOCTL_I830_DOCOPY DRM_IO ( 0x48)
#endif
diff --git a/shared/drm.h b/shared/drm.h
index ac9f407a..0db586fc 100644
--- a/shared/drm.h
+++ b/shared/drm.h
@@ -104,9 +104,8 @@ typedef struct drm_tex_region {
#include "i810_drm.h"
#include "r128_drm.h"
#include "radeon_drm.h"
-#ifdef CONFIG_DRM_SIS
#include "sis_drm.h"
-#endif
+#include "i830_drm.h"
typedef struct drm_version {
int version_major; /* Major version */
@@ -449,6 +448,12 @@ typedef struct drm_scatter_gather {
#define DRM_IOCTL_I810_SWAP DRM_IO( 0x46)
#define DRM_IOCTL_I810_COPY DRM_IOW( 0x47, drm_i810_copy_t)
#define DRM_IOCTL_I810_DOCOPY DRM_IO( 0x48)
+#define DRM_IOCTL_I810_OV0INFO DRM_IOR( 0x49, drm_i810_overlay_t)
+#define DRM_IOCTL_I810_FSTATUS DRM_IO ( 0x4a)
+#define DRM_IOCTL_I810_OV0FLIP DRM_IO ( 0x4b)
+#define DRM_IOCTL_I810_MC DRM_IOW( 0x4c, drm_i810_mc_t)
+#define DRM_IOCTL_I810_RSTATUS DRM_IO ( 0x4d )
+
/* Rage 128 specific ioctls */
#define DRM_IOCTL_R128_INIT DRM_IOW( 0x40, drm_r128_init_t)
@@ -483,7 +488,6 @@ typedef struct drm_scatter_gather {
#define DRM_IOCTL_RADEON_INDIRECT DRM_IOWR(0x4d, drm_radeon_indirect_t)
#define DRM_IOCTL_RADEON_TEXTURE DRM_IOWR(0x4e, drm_radeon_texture_t)
-#ifdef CONFIG_DRM_SIS
/* SiS specific ioctls */
#define SIS_IOCTL_FB_ALLOC DRM_IOWR(0x44, drm_sis_mem_t)
#define SIS_IOCTL_FB_FREE DRM_IOW( 0x45, drm_sis_mem_t)
@@ -493,6 +497,16 @@ typedef struct drm_scatter_gather {
#define SIS_IOCTL_FLIP DRM_IOW( 0x48, drm_sis_flip_t)
#define SIS_IOCTL_FLIP_INIT DRM_IO( 0x49)
#define SIS_IOCTL_FLIP_FINAL DRM_IO( 0x50)
-#endif
+
+/* I830 specific ioctls */
+#define DRM_IOCTL_I830_INIT DRM_IOW( 0x40, drm_i830_init_t)
+#define DRM_IOCTL_I830_VERTEX DRM_IOW( 0x41, drm_i830_vertex_t)
+#define DRM_IOCTL_I830_CLEAR DRM_IOW( 0x42, drm_i830_clear_t)
+#define DRM_IOCTL_I830_FLUSH DRM_IO ( 0x43)
+#define DRM_IOCTL_I830_GETAGE DRM_IO ( 0x44)
+#define DRM_IOCTL_I830_GETBUF DRM_IOWR(0x45, drm_i830_dma_t)
+#define DRM_IOCTL_I830_SWAP DRM_IO ( 0x46)
+#define DRM_IOCTL_I830_COPY DRM_IOW( 0x47, drm_i830_copy_t)
+#define DRM_IOCTL_I830_DOCOPY DRM_IO ( 0x48)
#endif