summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorDavid Dawes <dawes@xfree86.org>2001-03-19 17:45:52 +0000
committerDavid Dawes <dawes@xfree86.org>2001-03-19 17:45:52 +0000
commit0e5b8d77cfe0f86698041aebe31c54f59c877825 (patch)
treed0a7fe68c1c89b352fd254b1635d10ade3ad81fa
parentb42ff4f6600b97e9b7482152d5ea8713e800dc00 (diff)
Import of XFree86 4.0.99.1X_4_0_99_1
-rw-r--r--linux-core/radeon_drv.c731
-rw-r--r--linux/radeon_bufs.c298
-rw-r--r--linux/radeon_context.c215
-rw-r--r--linux/radeon_cp.c181
-rw-r--r--linux/radeon_drm.h34
-rw-r--r--linux/radeon_drv.c731
-rw-r--r--linux/radeon_drv.h170
-rw-r--r--linux/radeon_state.c312
8 files changed, 2228 insertions, 444 deletions
diff --git a/linux-core/radeon_drv.c b/linux-core/radeon_drv.c
index dba2037f9..0113ed97c 100644
--- a/linux-core/radeon_drv.c
+++ b/linux-core/radeon_drv.c
@@ -1,7 +1,7 @@
/* radeon_drv.c -- ATI Radeon driver -*- linux-c -*-
- * Created: Wed Feb 14 17:10:04 2001 by gareth@valinux.com
*
- * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
+ * Copyright 1999, 2000 Precision Insight, Inc., Cedar Park, Texas.
+ * Copyright 2000 VA Linux Systems, Inc., Fremont, California.
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
@@ -18,70 +18,685 @@
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Kevin E. Martin <martin@valinux.com>
+ * Rickard E. (Rik) Faith <faith@valinux.com>
*
- * Authors:
- * Gareth Hughes <gareth@valinux.com>
*/
#include <linux/config.h>
-#include "radeon.h"
#include "drmP.h"
#include "radeon_drv.h"
-#define DRIVER_AUTHOR "Gareth Hughes, VA Linux Systems Inc."
-
-#define DRIVER_NAME "radeon"
-#define DRIVER_DESC "ATI Radeon"
-#define DRIVER_DATE "20010308"
-
-#define DRIVER_MAJOR 1
-#define DRIVER_MINOR 0
-#define DRIVER_PATCHLEVEL 1
-
-#define DRIVER_IOCTLS \
- [DRM_IOCTL_NR(DRM_IOCTL_DMA)] = { radeon_cp_buffers, 1, 0 }, \
- [DRM_IOCTL_NR(DRM_IOCTL_RADEON_CP_INIT)] = { radeon_cp_init, 1, 1 }, \
- [DRM_IOCTL_NR(DRM_IOCTL_RADEON_CP_START)] = { radeon_cp_start, 1, 1 }, \
- [DRM_IOCTL_NR(DRM_IOCTL_RADEON_CP_STOP)] = { radeon_cp_stop, 1, 1 }, \
- [DRM_IOCTL_NR(DRM_IOCTL_RADEON_CP_RESET)] = { radeon_cp_reset, 1, 1 }, \
- [DRM_IOCTL_NR(DRM_IOCTL_RADEON_CP_IDLE)] = { radeon_cp_idle, 1, 0 }, \
- [DRM_IOCTL_NR(DRM_IOCTL_RADEON_RESET)] = { radeon_engine_reset, 1, 0 }, \
- [DRM_IOCTL_NR(DRM_IOCTL_RADEON_FULLSCREEN)] = { radeon_fullscreen, 1, 0 }, \
- [DRM_IOCTL_NR(DRM_IOCTL_RADEON_SWAP)] = { radeon_cp_swap, 1, 0 }, \
- [DRM_IOCTL_NR(DRM_IOCTL_RADEON_CLEAR)] = { radeon_cp_clear, 1, 0 }, \
- [DRM_IOCTL_NR(DRM_IOCTL_RADEON_VERTEX)] = { radeon_cp_vertex, 1, 0 }, \
- [DRM_IOCTL_NR(DRM_IOCTL_RADEON_INDICES)] = { radeon_cp_indices, 1, 0 }, \
- [DRM_IOCTL_NR(DRM_IOCTL_RADEON_TEXTURE)] = { radeon_cp_texture, 1, 0 }, \
- [DRM_IOCTL_NR(DRM_IOCTL_RADEON_STIPPLE)] = { radeon_cp_stipple, 1, 0 }, \
- [DRM_IOCTL_NR(DRM_IOCTL_RADEON_INDIRECT)] = { radeon_cp_indirect, 1, 1 },
+#define RADEON_NAME "radeon"
+#define RADEON_DESC "ATI Radeon"
+#define RADEON_DATE "20010105"
+#define RADEON_MAJOR 1
+#define RADEON_MINOR 0
+#define RADEON_PATCHLEVEL 0
+static drm_device_t radeon_device;
+drm_ctx_t radeon_res_ctx;
-#if 0
-/* GH: Count data sent to card via ring or vertex/indirect buffers.
+static struct file_operations radeon_fops = {
+#if LINUX_VERSION_CODE >= 0x020400
+ /* This started being used during 2.4.0-test */
+ owner: THIS_MODULE,
+#endif
+ open: radeon_open,
+ flush: drm_flush,
+ release: radeon_release,
+ ioctl: radeon_ioctl,
+ mmap: drm_mmap,
+ read: drm_read,
+ fasync: drm_fasync,
+ poll: drm_poll,
+};
+
+static struct miscdevice radeon_misc = {
+ minor: MISC_DYNAMIC_MINOR,
+ name: RADEON_NAME,
+ fops: &radeon_fops,
+};
+
+static drm_ioctl_desc_t radeon_ioctls[] = {
+ [DRM_IOCTL_NR(DRM_IOCTL_VERSION)] = { radeon_version, 0, 0 },
+ [DRM_IOCTL_NR(DRM_IOCTL_GET_UNIQUE)] = { drm_getunique, 0, 0 },
+ [DRM_IOCTL_NR(DRM_IOCTL_GET_MAGIC)] = { drm_getmagic, 0, 0 },
+ [DRM_IOCTL_NR(DRM_IOCTL_IRQ_BUSID)] = { drm_irq_busid, 0, 1 },
+
+ [DRM_IOCTL_NR(DRM_IOCTL_SET_UNIQUE)] = { drm_setunique, 1, 1 },
+ [DRM_IOCTL_NR(DRM_IOCTL_BLOCK)] = { drm_block, 1, 1 },
+ [DRM_IOCTL_NR(DRM_IOCTL_UNBLOCK)] = { drm_unblock, 1, 1 },
+ [DRM_IOCTL_NR(DRM_IOCTL_AUTH_MAGIC)] = { drm_authmagic, 1, 1 },
+ [DRM_IOCTL_NR(DRM_IOCTL_ADD_MAP)] = { drm_addmap, 1, 1 },
+ [DRM_IOCTL_NR(DRM_IOCTL_ADD_BUFS)] = { radeon_addbufs, 1, 1 },
+ [DRM_IOCTL_NR(DRM_IOCTL_MARK_BUFS)] = { drm_markbufs, 1, 1 },
+ [DRM_IOCTL_NR(DRM_IOCTL_INFO_BUFS)] = { drm_infobufs, 1, 0 },
+ [DRM_IOCTL_NR(DRM_IOCTL_MAP_BUFS)] = { radeon_mapbufs, 1, 0 },
+ [DRM_IOCTL_NR(DRM_IOCTL_FREE_BUFS)] = { drm_freebufs, 1, 0 },
+
+ [DRM_IOCTL_NR(DRM_IOCTL_ADD_CTX)] = { radeon_addctx, 1, 1 },
+ [DRM_IOCTL_NR(DRM_IOCTL_RM_CTX)] = { radeon_rmctx, 1, 1 },
+ [DRM_IOCTL_NR(DRM_IOCTL_MOD_CTX)] = { radeon_modctx, 1, 1 },
+ [DRM_IOCTL_NR(DRM_IOCTL_GET_CTX)] = { radeon_getctx, 1, 0 },
+ [DRM_IOCTL_NR(DRM_IOCTL_SWITCH_CTX)] = { radeon_switchctx, 1, 1 },
+ [DRM_IOCTL_NR(DRM_IOCTL_NEW_CTX)] = { radeon_newctx, 1, 1 },
+ [DRM_IOCTL_NR(DRM_IOCTL_RES_CTX)] = { radeon_resctx, 1, 0 },
+ [DRM_IOCTL_NR(DRM_IOCTL_ADD_DRAW)] = { drm_adddraw, 1, 1 },
+ [DRM_IOCTL_NR(DRM_IOCTL_RM_DRAW)] = { drm_rmdraw, 1, 1 },
+ [DRM_IOCTL_NR(DRM_IOCTL_DMA)] = { radeon_cp_buffers, 1, 0 },
+ [DRM_IOCTL_NR(DRM_IOCTL_LOCK)] = { radeon_lock, 1, 0 },
+ [DRM_IOCTL_NR(DRM_IOCTL_UNLOCK)] = { radeon_unlock, 1, 0 },
+ [DRM_IOCTL_NR(DRM_IOCTL_FINISH)] = { drm_finish, 1, 0 },
+
+#if defined(CONFIG_AGP) || defined(CONFIG_AGP_MODULE)
+ [DRM_IOCTL_NR(DRM_IOCTL_AGP_ACQUIRE)] = { drm_agp_acquire, 1, 1 },
+ [DRM_IOCTL_NR(DRM_IOCTL_AGP_RELEASE)] = { drm_agp_release, 1, 1 },
+ [DRM_IOCTL_NR(DRM_IOCTL_AGP_ENABLE)] = { drm_agp_enable, 1, 1 },
+ [DRM_IOCTL_NR(DRM_IOCTL_AGP_INFO)] = { drm_agp_info, 1, 0 },
+ [DRM_IOCTL_NR(DRM_IOCTL_AGP_ALLOC)] = { drm_agp_alloc, 1, 1 },
+ [DRM_IOCTL_NR(DRM_IOCTL_AGP_FREE)] = { drm_agp_free, 1, 1 },
+ [DRM_IOCTL_NR(DRM_IOCTL_AGP_BIND)] = { drm_agp_bind, 1, 1 },
+ [DRM_IOCTL_NR(DRM_IOCTL_AGP_UNBIND)] = { drm_agp_unbind, 1, 1 },
+#endif
+
+ [DRM_IOCTL_NR(DRM_IOCTL_RADEON_CP_INIT)] = { radeon_cp_init, 1, 1 },
+ [DRM_IOCTL_NR(DRM_IOCTL_RADEON_CP_START)] = { radeon_cp_start, 1, 1 },
+ [DRM_IOCTL_NR(DRM_IOCTL_RADEON_CP_STOP)] = { radeon_cp_stop, 1, 1 },
+ [DRM_IOCTL_NR(DRM_IOCTL_RADEON_CP_RESET)] = { radeon_cp_reset, 1, 1 },
+ [DRM_IOCTL_NR(DRM_IOCTL_RADEON_CP_IDLE)] = { radeon_cp_idle, 1, 0 },
+ [DRM_IOCTL_NR(DRM_IOCTL_RADEON_RESET)] = { radeon_engine_reset, 1, 0 },
+ [DRM_IOCTL_NR(DRM_IOCTL_RADEON_FULLSCREEN)] = { radeon_fullscreen, 1, 0 },
+
+ [DRM_IOCTL_NR(DRM_IOCTL_RADEON_SWAP)] = { radeon_cp_swap, 1, 0 },
+ [DRM_IOCTL_NR(DRM_IOCTL_RADEON_CLEAR)] = { radeon_cp_clear, 1, 0 },
+ [DRM_IOCTL_NR(DRM_IOCTL_RADEON_VERTEX)] = { radeon_cp_vertex, 1, 0 },
+ [DRM_IOCTL_NR(DRM_IOCTL_RADEON_INDICES)] = { radeon_cp_indices, 1, 0 },
+ [DRM_IOCTL_NR(DRM_IOCTL_RADEON_BLIT)] = { radeon_cp_blit, 1, 0 },
+ [DRM_IOCTL_NR(DRM_IOCTL_RADEON_STIPPLE)] = { radeon_cp_stipple, 1, 0 },
+ [DRM_IOCTL_NR(DRM_IOCTL_RADEON_INDIRECT)]= { radeon_cp_indirect,1, 1 },
+};
+#define RADEON_IOCTL_COUNT DRM_ARRAY_SIZE(radeon_ioctls)
+
+#ifdef MODULE
+static char *radeon = NULL;
+#endif
+
+MODULE_AUTHOR("VA Linux Systems, Inc.");
+MODULE_DESCRIPTION("radeon");
+MODULE_PARM(radeon, "s");
+
+#ifndef MODULE
+/* radeon_options is called by the kernel to parse command-line options
+ * passed via the boot-loader (e.g., LILO). It calls the insmod option
+ * routine, drm_parse_drm.
*/
-#define __HAVE_COUNTERS 3
-#define __HAVE_COUNTER6 _DRM_STAT_IRQ
-#define __HAVE_COUNTER7 _DRM_STAT_PRIMARY
-#define __HAVE_COUNTER8 _DRM_STAT_SECONDARY
-#endif
-
-
-#include "drm_agpsupport.h"
-#include "drm_auth.h"
-#include "drm_bufs.h"
-#include "drm_context.h"
-#include "drm_dma.h"
-#include "drm_drawable.h"
-#include "drm_drv.h"
-#include "drm_fops.h"
-#include "drm_init.h"
-#include "drm_ioctl.h"
-#include "drm_lock.h"
-#include "drm_memory.h"
-#include "drm_proc.h"
-#include "drm_vm.h"
-#include "drm_stub.h"
+
+static int __init radeon_options(char *str)
+{
+ drm_parse_options(str);
+ return 1;
+}
+
+__setup("radeon=", radeon_options);
+#endif
+
+static int radeon_setup(drm_device_t *dev)
+{
+ int i;
+
+ atomic_set(&dev->ioctl_count, 0);
+ atomic_set(&dev->vma_count, 0);
+ dev->buf_use = 0;
+ atomic_set(&dev->buf_alloc, 0);
+
+ drm_dma_setup(dev);
+
+ atomic_set(&dev->total_open, 0);
+ atomic_set(&dev->total_close, 0);
+ atomic_set(&dev->total_ioctl, 0);
+ atomic_set(&dev->total_irq, 0);
+ atomic_set(&dev->total_ctx, 0);
+ atomic_set(&dev->total_locks, 0);
+ atomic_set(&dev->total_unlocks, 0);
+ atomic_set(&dev->total_contends, 0);
+ atomic_set(&dev->total_sleeps, 0);
+
+ for (i = 0; i < DRM_HASH_SIZE; i++) {
+ dev->magiclist[i].head = NULL;
+ dev->magiclist[i].tail = NULL;
+ }
+ dev->maplist = NULL;
+ dev->map_count = 0;
+ dev->vmalist = NULL;
+ dev->lock.hw_lock = NULL;
+ init_waitqueue_head(&dev->lock.lock_queue);
+ dev->queue_count = 0;
+ dev->queue_reserved = 0;
+ dev->queue_slots = 0;
+ dev->queuelist = NULL;
+ dev->irq = 0;
+ dev->context_flag = 0;
+ dev->interrupt_flag = 0;
+ dev->dma_flag = 0;
+ dev->last_context = 0;
+ dev->last_switch = 0;
+ dev->last_checked = 0;
+ init_timer(&dev->timer);
+ init_waitqueue_head(&dev->context_wait);
+
+ dev->ctx_start = 0;
+ dev->lck_start = 0;
+
+ dev->buf_rp = dev->buf;
+ dev->buf_wp = dev->buf;
+ dev->buf_end = dev->buf + DRM_BSZ;
+ dev->buf_async = NULL;
+ init_waitqueue_head(&dev->buf_readers);
+ init_waitqueue_head(&dev->buf_writers);
+
+ radeon_res_ctx.handle = -1;
+
+ DRM_DEBUG("\n");
+
+ /* The kernel's context could be created here, but is now created
+ in drm_dma_enqueue. This is more resource-efficient for
+ hardware that does not do DMA, but may mean that
+ drm_select_queue fails between the time the interrupt is
+ initialized and the time the queues are initialized. */
+
+ return 0;
+}
+
+
+static int radeon_takedown(drm_device_t *dev)
+{
+ int i;
+ drm_magic_entry_t *pt, *next;
+ drm_map_t *map;
+ drm_vma_entry_t *vma, *vma_next;
+
+ DRM_DEBUG("\n");
+
+ down(&dev->struct_sem);
+ del_timer(&dev->timer);
+
+ if (dev->devname) {
+ drm_free(dev->devname, strlen(dev->devname)+1, DRM_MEM_DRIVER);
+ dev->devname = NULL;
+ }
+
+ if (dev->unique) {
+ drm_free(dev->unique, strlen(dev->unique)+1, DRM_MEM_DRIVER);
+ dev->unique = NULL;
+ dev->unique_len = 0;
+ }
+ /* Clear pid list */
+ for (i = 0; i < DRM_HASH_SIZE; i++) {
+ for (pt = dev->magiclist[i].head; pt; pt = next) {
+ next = pt->next;
+ drm_free(pt, sizeof(*pt), DRM_MEM_MAGIC);
+ }
+ dev->magiclist[i].head = dev->magiclist[i].tail = NULL;
+ }
+
+#if defined(CONFIG_AGP) || defined(CONFIG_AGP_MODULE)
+ /* Clear AGP information */
+ if (dev->agp) {
+ drm_agp_mem_t *entry;
+ drm_agp_mem_t *nexte;
+
+ /* Remove AGP resources, but leave dev->agp
+ intact until radeon_cleanup is called. */
+ for (entry = dev->agp->memory; entry; entry = nexte) {
+ nexte = entry->next;
+ if (entry->bound) drm_unbind_agp(entry->memory);
+ drm_free_agp(entry->memory, entry->pages);
+ drm_free(entry, sizeof(*entry), DRM_MEM_AGPLISTS);
+ }
+ dev->agp->memory = NULL;
+
+ if (dev->agp->acquired) _drm_agp_release();
+
+ dev->agp->acquired = 0;
+ dev->agp->enabled = 0;
+ }
+#endif
+
+ /* Clear vma list (only built for debugging) */
+ if (dev->vmalist) {
+ for (vma = dev->vmalist; vma; vma = vma_next) {
+ vma_next = vma->next;
+ drm_free(vma, sizeof(*vma), DRM_MEM_VMAS);
+ }
+ dev->vmalist = NULL;
+ }
+
+ /* Clear map area and mtrr information */
+ if (dev->maplist) {
+ for (i = 0; i < dev->map_count; i++) {
+ map = dev->maplist[i];
+ switch (map->type) {
+ case _DRM_REGISTERS:
+ case _DRM_FRAME_BUFFER:
+#ifdef CONFIG_MTRR
+ if (map->mtrr >= 0) {
+ int retcode;
+ retcode = mtrr_del(map->mtrr,
+ map->offset,
+ map->size);
+ DRM_DEBUG("mtrr_del = %d\n", retcode);
+ }
+#endif
+ drm_ioremapfree(map->handle, map->size);
+ break;
+ case _DRM_SHM:
+ drm_free_pages((unsigned long)map->handle,
+ drm_order(map->size)
+ - PAGE_SHIFT,
+ DRM_MEM_SAREA);
+ break;
+ case _DRM_AGP:
+ /* Do nothing here, because this is all
+ handled in the AGP/GART driver. */
+ break;
+ }
+ drm_free(map, sizeof(*map), DRM_MEM_MAPS);
+ }
+ drm_free(dev->maplist,
+ dev->map_count * sizeof(*dev->maplist),
+ DRM_MEM_MAPS);
+ dev->maplist = NULL;
+ dev->map_count = 0;
+ }
+
+ drm_dma_takedown(dev);
+
+ dev->queue_count = 0;
+ if (dev->lock.hw_lock) {
+ dev->lock.hw_lock = NULL; /* SHM removed */
+ dev->lock.pid = 0;
+ wake_up_interruptible(&dev->lock.lock_queue);
+ }
+ up(&dev->struct_sem);
+
+ return 0;
+}
+
+/* radeon_init is called via init_module at module load time, or via
+ * linux/init/main.c (this is not currently supported). */
+
+static int __init radeon_init(void)
+{
+ int retcode;
+ drm_device_t *dev = &radeon_device;
+
+ DRM_DEBUG("\n");
+
+ memset((void *)dev, 0, sizeof(*dev));
+ dev->count_lock = SPIN_LOCK_UNLOCKED;
+ sema_init(&dev->struct_sem, 1);
+
+#ifdef MODULE
+ drm_parse_options(radeon);
+#endif
+
+ if ((retcode = misc_register(&radeon_misc))) {
+ DRM_ERROR("Cannot register \"%s\"\n", RADEON_NAME);
+ return retcode;
+ }
+ dev->device = MKDEV(MISC_MAJOR, radeon_misc.minor);
+ dev->name = RADEON_NAME;
+
+ drm_mem_init();
+ drm_proc_init(dev);
+
+#if defined(CONFIG_AGP) || defined(CONFIG_AGP_MODULE)
+ dev->agp = drm_agp_init();
+ if (dev->agp == NULL) {
+ DRM_ERROR("Cannot initialize agpgart module.\n");
+ drm_proc_cleanup();
+ misc_deregister(&radeon_misc);
+ radeon_takedown(dev);
+ return -ENOMEM;
+ }
+
+#ifdef CONFIG_MTRR
+ dev->agp->agp_mtrr = mtrr_add(dev->agp->agp_info.aper_base,
+ dev->agp->agp_info.aper_size*1024*1024,
+ MTRR_TYPE_WRCOMB,
+ 1);
+#endif
+#endif
+
+ if((retcode = drm_ctxbitmap_init(dev))) {
+ DRM_ERROR("Cannot allocate memory for context bitmap.\n");
+ drm_proc_cleanup();
+ misc_deregister(&radeon_misc);
+ radeon_takedown(dev);
+ return retcode;
+ }
+
+ DRM_INFO("Initialized %s %d.%d.%d %s on minor %d\n",
+ RADEON_NAME,
+ RADEON_MAJOR,
+ RADEON_MINOR,
+ RADEON_PATCHLEVEL,
+ RADEON_DATE,
+ radeon_misc.minor);
+
+ return 0;
+}
+
+/* radeon_cleanup is called via cleanup_module at module unload time. */
+
+static void __exit radeon_cleanup(void)
+{
+ drm_device_t *dev = &radeon_device;
+
+ DRM_DEBUG("\n");
+
+ drm_proc_cleanup();
+ if (misc_deregister(&radeon_misc)) {
+ DRM_ERROR("Cannot unload module\n");
+ } else {
+ DRM_INFO("Module unloaded\n");
+ }
+ drm_ctxbitmap_cleanup(dev);
+ radeon_takedown(dev);
+#if defined(CONFIG_AGP) || defined(CONFIG_AGP_MODULE)
+ if (dev->agp) {
+ drm_agp_uninit();
+ drm_free(dev->agp, sizeof(*dev->agp), DRM_MEM_AGPLISTS);
+ dev->agp = NULL;
+ }
+#endif
+}
+
+module_init(radeon_init);
+module_exit(radeon_cleanup);
+
+
+int radeon_version(struct inode *inode, struct file *filp, unsigned int cmd,
+ unsigned long arg)
+{
+ drm_version_t version;
+ int len;
+
+ if (copy_from_user(&version,
+ (drm_version_t *)arg,
+ sizeof(version)))
+ return -EFAULT;
+
+#define DRM_COPY(name,value) \
+ len = strlen(value); \
+ if (len > name##_len) len = name##_len; \
+ name##_len = strlen(value); \
+ if (len && name) { \
+ if (copy_to_user(name, value, len)) \
+ return -EFAULT; \
+ }
+
+ version.version_major = RADEON_MAJOR;
+ version.version_minor = RADEON_MINOR;
+ version.version_patchlevel = RADEON_PATCHLEVEL;
+
+ DRM_COPY(version.name, RADEON_NAME);
+ DRM_COPY(version.date, RADEON_DATE);
+ DRM_COPY(version.desc, RADEON_DESC);
+
+ if (copy_to_user((drm_version_t *)arg,
+ &version,
+ sizeof(version)))
+ return -EFAULT;
+ return 0;
+}
+
+int radeon_open(struct inode *inode, struct file *filp)
+{
+ drm_device_t *dev = &radeon_device;
+ int retcode = 0;
+
+ DRM_DEBUG("open_count = %d\n", dev->open_count);
+ if (!(retcode = drm_open_helper(inode, filp, dev))) {
+#if LINUX_VERSION_CODE < 0x020333
+ MOD_INC_USE_COUNT; /* Needed before Linux 2.3.51 */
+#endif
+ atomic_inc(&dev->total_open);
+ spin_lock(&dev->count_lock);
+ if (!dev->open_count++) {
+ spin_unlock(&dev->count_lock);
+ return radeon_setup(dev);
+ }
+ spin_unlock(&dev->count_lock);
+ }
+
+ return retcode;
+}
+
+int radeon_release(struct inode *inode, struct file *filp)
+{
+ drm_file_t *priv = filp->private_data;
+ drm_device_t *dev;
+ int retcode = 0;
+
+ lock_kernel();
+ dev = priv->dev;
+
+ DRM_DEBUG("open_count = %d\n", dev->open_count);
+
+ /* Force the cleanup of page flipping when required */
+ if ( dev->dev_private ) {
+ drm_radeon_private_t *dev_priv = dev->dev_private;
+ if ( dev_priv->page_flipping ) {
+ radeon_do_cleanup_pageflip( dev );
+ }
+ }
+
+ if (!(retcode = drm_release(inode, filp))) {
+#if LINUX_VERSION_CODE < 0x020333
+ MOD_DEC_USE_COUNT; /* Needed before Linux 2.3.51 */
+#endif
+ atomic_inc(&dev->total_close);
+ spin_lock(&dev->count_lock);
+ if (!--dev->open_count) {
+ if (atomic_read(&dev->ioctl_count) || dev->blocked) {
+ DRM_ERROR("Device busy: %d %d\n",
+ atomic_read(&dev->ioctl_count),
+ dev->blocked);
+ spin_unlock(&dev->count_lock);
+ unlock_kernel();
+ return -EBUSY;
+ }
+ spin_unlock(&dev->count_lock);
+ unlock_kernel();
+ return radeon_takedown(dev);
+ }
+ spin_unlock(&dev->count_lock);
+ }
+
+ unlock_kernel();
+ return retcode;
+}
+
+/* radeon_ioctl is called whenever a process performs an ioctl on /dev/drm. */
+
+int radeon_ioctl(struct inode *inode, struct file *filp, unsigned int cmd,
+ unsigned long arg)
+{
+ int nr = DRM_IOCTL_NR(cmd);
+ drm_file_t *priv = filp->private_data;
+ drm_device_t *dev = priv->dev;
+ int retcode = 0;
+ drm_ioctl_desc_t *ioctl;
+ drm_ioctl_t *func;
+
+ atomic_inc(&dev->ioctl_count);
+ atomic_inc(&dev->total_ioctl);
+ ++priv->ioctl_count;
+
+ DRM_DEBUG("pid = %d, cmd = 0x%02x, nr = 0x%02x, dev 0x%x, auth = %d\n",
+ current->pid, cmd, nr, dev->device, priv->authenticated);
+
+ if (nr >= RADEON_IOCTL_COUNT) {
+ retcode = -EINVAL;
+ } else {
+ ioctl = &radeon_ioctls[nr];
+ func = ioctl->func;
+
+ if (!func) {
+ DRM_DEBUG("no function\n");
+ retcode = -EINVAL;
+ } else if ((ioctl->root_only && !capable(CAP_SYS_ADMIN))
+ || (ioctl->auth_needed && !priv->authenticated)) {
+ retcode = -EACCES;
+ } else {
+ retcode = (func)(inode, filp, cmd, arg);
+ }
+ }
+
+ atomic_dec(&dev->ioctl_count);
+ return retcode;
+}
+
+int radeon_lock(struct inode *inode, struct file *filp, unsigned int cmd,
+ unsigned long arg)
+{
+ drm_file_t *priv = filp->private_data;
+ drm_device_t *dev = priv->dev;
+ DECLARE_WAITQUEUE(entry, current);
+ int ret = 0;
+ drm_lock_t lock;
+#if DRM_DMA_HISTOGRAM
+ cycles_t start;
+
+ dev->lck_start = start = get_cycles();
+#endif
+
+ if (copy_from_user(&lock, (drm_lock_t *)arg, sizeof(lock)))
+ return -EFAULT;
+
+ if (lock.context == DRM_KERNEL_CONTEXT) {
+ DRM_ERROR("Process %d using kernel context %d\n",
+ current->pid, lock.context);
+ return -EINVAL;
+ }
+
+ DRM_DEBUG("%d (pid %d) requests lock (0x%08x), flags = 0x%08x\n",
+ lock.context, current->pid, dev->lock.hw_lock->lock,
+ lock.flags);
+
+ if (lock.context < 0 /* || lock.context >= dev->queue_count */)
+ return -EINVAL;
+
+ if (!ret) {
+ add_wait_queue(&dev->lock.lock_queue, &entry);
+ for (;;) {
+ current->state = TASK_INTERRUPTIBLE;
+ if (!dev->lock.hw_lock) {
+ /* Device has been unregistered */
+ ret = -EINTR;
+ break;
+ }
+ if (drm_lock_take(&dev->lock.hw_lock->lock,
+ lock.context)) {
+ dev->lock.pid = current->pid;
+ dev->lock.lock_time = jiffies;
+ atomic_inc(&dev->total_locks);
+ break; /* Got lock */
+ }
+
+ /* Contention */
+ atomic_inc(&dev->total_sleeps);
+ schedule();
+ if (signal_pending(current)) {
+ ret = -ERESTARTSYS;
+ break;
+ }
+ }
+ current->state = TASK_RUNNING;
+ remove_wait_queue(&dev->lock.lock_queue, &entry);
+ }
+
+ if (!ret) {
+ sigemptyset(&dev->sigmask);
+ sigaddset(&dev->sigmask, SIGSTOP);
+ sigaddset(&dev->sigmask, SIGTSTP);
+ sigaddset(&dev->sigmask, SIGTTIN);
+ sigaddset(&dev->sigmask, SIGTTOU);
+ dev->sigdata.context = lock.context;
+ dev->sigdata.lock = dev->lock.hw_lock;
+ block_all_signals(drm_notifier, &dev->sigdata, &dev->sigmask);
+ if (lock.flags & _DRM_LOCK_READY) {
+ /* Wait for space in DMA/FIFO */
+ }
+ if (lock.flags & _DRM_LOCK_QUIESCENT) {
+ /* Make hardware quiescent */
+ DRM_DEBUG("not quiescent!\n");
+#if 0
+ radeon_quiescent(dev);
+#endif
+ }
+ }
+
+#if LINUX_VERSION_CODE < 0x020400
+ if (lock.context != radeon_res_ctx.handle) {
+ current->counter = 5;
+ current->priority = DEF_PRIORITY/4;
+ }
+#endif
+ DRM_DEBUG("%d %s\n", lock.context, ret ? "interrupted" : "has lock");
+
+#if DRM_DMA_HISTOGRAM
+ atomic_inc(&dev->histo.lacq[drm_histogram_slot(get_cycles() - start)]);
+#endif
+
+ return ret;
+}
+
+
+int radeon_unlock(struct inode *inode, struct file *filp, unsigned int cmd,
+ unsigned long arg)
+{
+ drm_file_t *priv = filp->private_data;
+ drm_device_t *dev = priv->dev;
+ drm_lock_t lock;
+
+ if (copy_from_user(&lock, (drm_lock_t *)arg, sizeof(lock)))
+ return -EFAULT;
+
+ if (lock.context == DRM_KERNEL_CONTEXT) {
+ DRM_ERROR("Process %d using kernel context %d\n",
+ current->pid, lock.context);
+ return -EINVAL;
+ }
+
+ DRM_DEBUG("%d frees lock (%d holds)\n",
+ lock.context,
+ _DRM_LOCKING_CONTEXT(dev->lock.hw_lock->lock));
+ atomic_inc(&dev->total_unlocks);
+ if (_DRM_LOCK_IS_CONT(dev->lock.hw_lock->lock))
+ atomic_inc(&dev->total_contends);
+ drm_lock_transfer(dev, &dev->lock.hw_lock->lock, DRM_KERNEL_CONTEXT);
+ /* FIXME: Try to send data to card here */
+ if (!dev->context_flag) {
+ if (drm_lock_free(dev, &dev->lock.hw_lock->lock,
+ DRM_KERNEL_CONTEXT)) {
+ DRM_ERROR("\n");
+ }
+ }
+
+#if LINUX_VERSION_CODE < 0x020400
+ if (lock.context != radeon_res_ctx.handle) {
+ current->counter = 5;
+ current->priority = DEF_PRIORITY;
+ }
+#endif
+ unblock_all_signals();
+ return 0;
+}
diff --git a/linux/radeon_bufs.c b/linux/radeon_bufs.c
new file mode 100644
index 000000000..9a3093eb1
--- /dev/null
+++ b/linux/radeon_bufs.c
@@ -0,0 +1,298 @@
+/* radeon_bufs.c -- IOCTLs to manage buffers -*- linux-c -*-
+ *
+ * Copyright 2000 Precision Insight, Inc., Cedar Park, Texas.
+ * Copyright 2000 VA Linux Systems, Inc., Fremont, California.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Kevin E. Martin <martin@valinux.com>
+ * Rickard E. (Rik) Faith <faith@valinux.com>
+ * Jeff Hartmann <jhartmann@valinux.com>
+ *
+ */
+
+#define __NO_VERSION__
+#include <linux/config.h>
+#include "drmP.h"
+#include "radeon_drv.h"
+#include "linux/un.h"
+
+
+#if defined(CONFIG_AGP) || defined(CONFIG_AGP_MODULE)
+int radeon_addbufs_agp(struct inode *inode, struct file *filp,
+ unsigned int cmd, unsigned long arg)
+{
+ drm_file_t *priv = filp->private_data;
+ drm_device_t *dev = priv->dev;
+ drm_device_dma_t *dma = dev->dma;
+ drm_buf_desc_t request;
+ drm_buf_entry_t *entry;
+ drm_buf_t *buf;
+ unsigned long offset;
+ unsigned long agp_offset;
+ int count;
+ int order;
+ int size;
+ int alignment;
+ int page_order;
+ int total;
+ int byte_count;
+ int i;
+
+ if (!dma) return -EINVAL;
+
+ if (copy_from_user(&request, (drm_buf_desc_t *)arg, sizeof(request)))
+ return -EFAULT;
+
+ count = request.count;
+ order = drm_order(request.size);
+ size = 1 << order;
+
+ alignment = (request.flags & _DRM_PAGE_ALIGN) ? PAGE_ALIGN(size):size;
+ page_order = order - PAGE_SHIFT > 0 ? order - PAGE_SHIFT : 0;
+ total = PAGE_SIZE << page_order;
+
+ byte_count = 0;
+ agp_offset = dev->agp->base + request.agp_start;
+
+ DRM_DEBUG("count: %d\n", count);
+ DRM_DEBUG("order: %d\n", order);
+ DRM_DEBUG("size: %d\n", size);
+ DRM_DEBUG("agp_offset: %ld\n", agp_offset);
+ DRM_DEBUG("alignment: %d\n", alignment);
+ DRM_DEBUG("page_order: %d\n", page_order);
+ DRM_DEBUG("total: %d\n", total);
+
+ if (order < DRM_MIN_ORDER || order > DRM_MAX_ORDER) return -EINVAL;
+ if (dev->queue_count) return -EBUSY; /* Not while in use */
+
+ spin_lock(&dev->count_lock);
+ if (dev->buf_use) {
+ spin_unlock(&dev->count_lock);
+ return -EBUSY;
+ }
+ atomic_inc(&dev->buf_alloc);
+ spin_unlock(&dev->count_lock);
+
+ down(&dev->struct_sem);
+ entry = &dma->bufs[order];
+ if (entry->buf_count) {
+ up(&dev->struct_sem);
+ atomic_dec(&dev->buf_alloc);
+ return -ENOMEM; /* May only call once for each order */
+ }
+
+ entry->buflist = drm_alloc(count * sizeof(*entry->buflist),
+ DRM_MEM_BUFS);
+ if (!entry->buflist) {
+ up(&dev->struct_sem);
+ atomic_dec(&dev->buf_alloc);
+ return -ENOMEM;
+ }
+ memset(entry->buflist, 0, count * sizeof(*entry->buflist));
+
+ entry->buf_size = size;
+ entry->page_order = page_order;
+ offset = 0;
+
+ for (offset = 0;
+ entry->buf_count < count;
+ offset += alignment, ++entry->buf_count) {
+ buf = &entry->buflist[entry->buf_count];
+ buf->idx = dma->buf_count + entry->buf_count;
+ buf->total = alignment;
+ buf->order = order;
+ buf->used = 0;
+ buf->offset = (dma->byte_count + offset);
+ buf->address = (void *)(agp_offset + offset);
+ buf->next = NULL;
+ buf->waiting = 0;
+ buf->pending = 0;
+ init_waitqueue_head(&buf->dma_wait);
+ buf->pid = 0;
+
+ buf->dev_priv_size = sizeof(drm_radeon_buf_priv_t);
+ buf->dev_private = drm_alloc(sizeof(drm_radeon_buf_priv_t),
+ DRM_MEM_BUFS);
+ memset(buf->dev_private, 0, buf->dev_priv_size);
+
+#if DRM_DMA_HISTOGRAM
+ buf->time_queued = 0;
+ buf->time_dispatched = 0;
+ buf->time_completed = 0;
+ buf->time_freed = 0;
+#endif
+
+ byte_count += PAGE_SIZE << page_order;
+
+ DRM_DEBUG("buffer %d @ %p\n",
+ entry->buf_count, buf->address);
+ }
+
+ DRM_DEBUG("byte_count: %d\n", byte_count);
+
+ dma->buflist = drm_realloc(dma->buflist,
+ dma->buf_count * sizeof(*dma->buflist),
+ (dma->buf_count + entry->buf_count)
+ * sizeof(*dma->buflist),
+ DRM_MEM_BUFS);
+ for (i = dma->buf_count; i < dma->buf_count + entry->buf_count; i++)
+ dma->buflist[i] = &entry->buflist[i - dma->buf_count];
+
+ dma->buf_count += entry->buf_count;
+ dma->byte_count += byte_count;
+
+ drm_freelist_create(&entry->freelist, entry->buf_count);
+ for (i = 0; i < entry->buf_count; i++) {
+ drm_freelist_put(dev, &entry->freelist, &entry->buflist[i]);
+ }
+
+ up(&dev->struct_sem);
+
+ request.count = entry->buf_count;
+ request.size = size;
+
+ if (copy_to_user((drm_buf_desc_t *)arg, &request, sizeof(request)))
+ return -EFAULT;
+
+ dma->flags = _DRM_DMA_USE_AGP;
+
+ atomic_dec(&dev->buf_alloc);
+ return 0;
+}
+#endif
+
+int radeon_addbufs(struct inode *inode, struct file *filp, unsigned int cmd,
+ unsigned long arg)
+{
+ drm_file_t *priv = filp->private_data;
+ drm_device_t *dev = priv->dev;
+ drm_radeon_private_t *dev_priv = dev->dev_private;
+ drm_buf_desc_t request;
+
+ if (!dev_priv || dev_priv->is_pci) return -EINVAL;
+
+ if (copy_from_user(&request, (drm_buf_desc_t *)arg, sizeof(request)))
+ return -EFAULT;
+
+#if defined(CONFIG_AGP) || defined(CONFIG_AGP_MODULE)
+ if (request.flags & _DRM_AGP_BUFFER)
+ return radeon_addbufs_agp(inode, filp, cmd, arg);
+ else
+#endif
+ return -EINVAL;
+}
+
+int radeon_mapbufs(struct inode *inode, struct file *filp, unsigned int cmd,
+ unsigned long arg)
+{
+ drm_file_t *priv = filp->private_data;
+ drm_device_t *dev = priv->dev;
+ drm_radeon_private_t *dev_priv = dev->dev_private;
+ drm_device_dma_t *dma = dev->dma;
+ int retcode = 0;
+ const int zero = 0;
+ unsigned long virtual;
+ unsigned long address;
+ drm_buf_map_t request;
+ int i;
+
+ if (!dma || !dev_priv || dev_priv->is_pci) return -EINVAL;
+
+ DRM_DEBUG("\n");
+
+ spin_lock(&dev->count_lock);
+ if (atomic_read(&dev->buf_alloc)) {
+ spin_unlock(&dev->count_lock);
+ return -EBUSY;
+ }
+ ++dev->buf_use; /* Can't allocate more after this call */
+ spin_unlock(&dev->count_lock);
+
+ if (copy_from_user(&request, (drm_buf_map_t *)arg, sizeof(request)))
+ return -EFAULT;
+
+ if (request.count >= dma->buf_count) {
+ if (dma->flags & _DRM_DMA_USE_AGP) {
+ drm_map_t *map;
+
+ map = dev_priv->buffers;
+ if (!map) {
+ retcode = -EINVAL;
+ goto done;
+ }
+
+ down(&current->mm->mmap_sem);
+ virtual = do_mmap(filp, 0, map->size,
+ PROT_READ|PROT_WRITE,
+ MAP_SHARED,
+ (unsigned long)map->offset);
+ up(&current->mm->mmap_sem);
+ } else {
+ down(&current->mm->mmap_sem);
+ virtual = do_mmap(filp, 0, dma->byte_count,
+ PROT_READ|PROT_WRITE, MAP_SHARED, 0);
+ up(&current->mm->mmap_sem);
+ }
+ if (virtual > -1024UL) {
+ /* Real error */
+ retcode = (signed long)virtual;
+ goto done;
+ }
+ request.virtual = (void *)virtual;
+
+ for (i = 0; i < dma->buf_count; i++) {
+ if (copy_to_user(&request.list[i].idx,
+ &dma->buflist[i]->idx,
+ sizeof(request.list[0].idx))) {
+ retcode = -EFAULT;
+ goto done;
+ }
+ if (copy_to_user(&request.list[i].total,
+ &dma->buflist[i]->total,
+ sizeof(request.list[0].total))) {
+ retcode = -EFAULT;
+ goto done;
+ }
+ if (copy_to_user(&request.list[i].used,
+ &zero,
+ sizeof(zero))) {
+ retcode = -EFAULT;
+ goto done;
+ }
+ address = virtual + dma->buflist[i]->offset;
+ if (copy_to_user(&request.list[i].address,
+ &address,
+ sizeof(address))) {
+ retcode = -EFAULT;
+ goto done;
+ }
+ }
+ }
+ done:
+ request.count = dma->buf_count;
+ DRM_DEBUG("%d buffers, retcode = %d\n", request.count, retcode);
+
+ if (copy_to_user((drm_buf_map_t *)arg, &request, sizeof(request)))
+ return -EFAULT;
+
+ return retcode;
+}
diff --git a/linux/radeon_context.c b/linux/radeon_context.c
new file mode 100644
index 000000000..e428dc22c
--- /dev/null
+++ b/linux/radeon_context.c
@@ -0,0 +1,215 @@
+/* radeon_context.c -- IOCTLs for Radeon contexts -*- linux-c -*-
+ *
+ * Copyright 1999, 2000 Precision Insight, Inc., Cedar Park, Texas.
+ * Copyright 2000 VA Linux Systems, Inc., Fremont, California.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ * Author: Kevin E. Martin <martin@valinux.com>
+ * Rickard E. (Rik) Faith <faith@valinux.com>
+ *
+ */
+
+#define __NO_VERSION__
+#include "drmP.h"
+#include "radeon_drv.h"
+
+extern drm_ctx_t radeon_res_ctx;
+
+static int radeon_alloc_queue(drm_device_t *dev)
+{
+ return drm_ctxbitmap_next(dev);
+}
+
+int radeon_context_switch(drm_device_t *dev, int old, int new)
+{
+ char buf[64];
+
+ atomic_inc(&dev->total_ctx);
+
+ if (test_and_set_bit(0, &dev->context_flag)) {
+ DRM_ERROR("Reentering -- FIXME\n");
+ return -EBUSY;
+ }
+
+#if DRM_DMA_HISTOGRAM
+ dev->ctx_start = get_cycles();
+#endif
+
+ DRM_DEBUG("Context switch from %d to %d\n", old, new);
+
+ if (new == dev->last_context) {
+ clear_bit(0, &dev->context_flag);
+ return 0;
+ }
+
+ if (drm_flags & DRM_FLAG_NOCTX) {
+ radeon_context_switch_complete(dev, new);
+ } else {
+ sprintf(buf, "C %d %d\n", old, new);
+ drm_write_string(dev, buf);
+ }
+
+ return 0;
+}
+
+int radeon_context_switch_complete(drm_device_t *dev, int new)
+{
+ dev->last_context = new; /* PRE/POST: This is the _only_ writer. */
+ dev->last_switch = jiffies;
+
+ if (!_DRM_LOCK_IS_HELD(dev->lock.hw_lock->lock)) {
+ DRM_ERROR("Lock isn't held after context switch\n");
+ }
+
+ /* If a context switch is ever initiated
+ when the kernel holds the lock, release
+ that lock here. */
+#if DRM_DMA_HISTOGRAM
+ atomic_inc(&dev->histo.ctx[drm_histogram_slot(get_cycles()
+ - dev->ctx_start)]);
+
+#endif
+ clear_bit(0, &dev->context_flag);
+ wake_up(&dev->context_wait);
+
+ return 0;
+}
+
+
+int radeon_resctx(struct inode *inode, struct file *filp, unsigned int cmd,
+ unsigned long arg)
+{
+ drm_ctx_res_t res;
+ drm_ctx_t ctx;
+ int i;
+
+ DRM_DEBUG("%d\n", DRM_RESERVED_CONTEXTS);
+ if (copy_from_user(&res, (drm_ctx_res_t *)arg, sizeof(res)))
+ return -EFAULT;
+ if (res.count >= DRM_RESERVED_CONTEXTS) {
+ memset(&ctx, 0, sizeof(ctx));
+ for (i = 0; i < DRM_RESERVED_CONTEXTS; i++) {
+ ctx.handle = i;
+ if (copy_to_user(&res.contexts[i], &i, sizeof(i)))
+ return -EFAULT;
+ }
+ }
+ res.count = DRM_RESERVED_CONTEXTS;
+ if (copy_to_user((drm_ctx_res_t *)arg, &res, sizeof(res)))
+ return -EFAULT;
+ return 0;
+}
+
+
+int radeon_addctx(struct inode *inode, struct file *filp, unsigned int cmd,
+ unsigned long arg)
+{
+ drm_file_t *priv = filp->private_data;
+ drm_device_t *dev = priv->dev;
+ drm_ctx_t ctx;
+
+ if (copy_from_user(&ctx, (drm_ctx_t *)arg, sizeof(ctx)))
+ return -EFAULT;
+ if ((ctx.handle = radeon_alloc_queue(dev)) == DRM_KERNEL_CONTEXT) {
+ /* Skip kernel's context and get a new one. */
+ ctx.handle = radeon_alloc_queue(dev);
+ }
+ DRM_DEBUG("%d\n", ctx.handle);
+ if (ctx.handle == -1) {
+ DRM_DEBUG("Not enough free contexts.\n");
+ /* Should this return -EBUSY instead? */
+ return -ENOMEM;
+ }
+
+ if (copy_to_user((drm_ctx_t *)arg, &ctx, sizeof(ctx)))
+ return -EFAULT;
+ return 0;
+}
+
+int radeon_modctx(struct inode *inode, struct file *filp, unsigned int cmd,
+ unsigned long arg)
+{
+ drm_ctx_t ctx;
+
+ if (copy_from_user(&ctx, (drm_ctx_t*)arg, sizeof(ctx)))
+ return -EFAULT;
+ if (ctx.flags==_DRM_CONTEXT_PRESERVED)
+ radeon_res_ctx.handle=ctx.handle;
+ return 0;
+}
+
+int radeon_getctx(struct inode *inode, struct file *filp, unsigned int cmd,
+ unsigned long arg)
+{
+ drm_ctx_t ctx;
+
+ if (copy_from_user(&ctx, (drm_ctx_t*)arg, sizeof(ctx)))
+ return -EFAULT;
+ /* This is 0, because we don't hanlde any context flags */
+ ctx.flags = 0;
+ if (copy_to_user((drm_ctx_t*)arg, &ctx, sizeof(ctx)))
+ return -EFAULT;
+ return 0;
+}
+
+int radeon_switchctx(struct inode *inode, struct file *filp, unsigned int cmd,
+ unsigned long arg)
+{
+ drm_file_t *priv = filp->private_data;
+ drm_device_t *dev = priv->dev;
+ drm_ctx_t ctx;
+
+ if (copy_from_user(&ctx, (drm_ctx_t *)arg, sizeof(ctx)))
+ return -EFAULT;
+ DRM_DEBUG("%d\n", ctx.handle);
+ return radeon_context_switch(dev, dev->last_context, ctx.handle);
+}
+
+int radeon_newctx(struct inode *inode, struct file *filp, unsigned int cmd,
+ unsigned long arg)
+{
+ drm_file_t *priv = filp->private_data;
+ drm_device_t *dev = priv->dev;
+ drm_ctx_t ctx;
+
+ if (copy_from_user(&ctx, (drm_ctx_t *)arg, sizeof(ctx)))
+ return -EFAULT;
+ DRM_DEBUG("%d\n", ctx.handle);
+ radeon_context_switch_complete(dev, ctx.handle);
+
+ return 0;
+}
+
+int radeon_rmctx(struct inode *inode, struct file *filp, unsigned int cmd,
+ unsigned long arg)
+{
+ drm_file_t *priv = filp->private_data;
+ drm_device_t *dev = priv->dev;
+ drm_ctx_t ctx;
+
+ if (copy_from_user(&ctx, (drm_ctx_t *)arg, sizeof(ctx)))
+ return -EFAULT;
+ DRM_DEBUG("%d\n", ctx.handle);
+ drm_ctxbitmap_free(dev, ctx.handle);
+
+ return 0;
+}
diff --git a/linux/radeon_cp.c b/linux/radeon_cp.c
index ed8b1bbca..5d662bc08 100644
--- a/linux/radeon_cp.c
+++ b/linux/radeon_cp.c
@@ -24,12 +24,12 @@
* DEALINGS IN THE SOFTWARE.
*
* Authors:
- * Kevin E. Martin <martin@valinux.com>
- * Gareth Hughes <gareth@valinux.com>
+ * Kevin E. Martin <martin@valinux.com>
+ * Gareth Hughes <gareth@valinux.com>
+ *
*/
#define __NO_VERSION__
-#include "radeon.h"
#include "drmP.h"
#include "radeon_drv.h"
@@ -300,6 +300,26 @@ static u32 radeon_cp_microcode[][2] = {
};
+#define DO_IOREMAP(_m) (_m)->handle = drm_ioremap((_m)->offset, (_m)->size)
+
+#define DO_IOREMAPFREE(_m) \
+ do { \
+ if ((_m)->handle && (_m)->size) \
+ drm_ioremapfree((_m)->handle, (_m)->size); \
+ } while (0)
+
+#define DO_FIND_MAP(_m, _o) \
+ do { \
+ int _i; \
+ for (_i = 0; _i < dev->map_count; _i++) { \
+ if (dev->maplist[_i]->offset == _o) { \
+ _m = dev->maplist[_i]; \
+ break; \
+ } \
+ } \
+ } while (0)
+
+
int RADEON_READ_PLL(drm_device_t *dev, int addr)
{
drm_radeon_private_t *dev_priv = dev->dev_private;
@@ -601,9 +621,9 @@ static void radeon_cp_init_ring_buffer( drm_device_t *dev )
static int radeon_do_init_cp( drm_device_t *dev, drm_radeon_init_t *init )
{
drm_radeon_private_t *dev_priv;
- struct list_head *list;
+ int i;
- dev_priv = DRM(alloc)( sizeof(drm_radeon_private_t), DRM_MEM_DRIVER );
+ dev_priv = drm_alloc( sizeof(drm_radeon_private_t), DRM_MEM_DRIVER );
if ( dev_priv == NULL )
return -ENOMEM;
dev->dev_private = (void *)dev_priv;
@@ -617,7 +637,7 @@ static int radeon_do_init_cp( drm_device_t *dev, drm_radeon_init_t *init )
* the CP ring code.
*/
if ( dev_priv->is_pci ) {
- DRM(free)( dev_priv, sizeof(*dev_priv), DRM_MEM_DRIVER );
+ drm_free( dev_priv, sizeof(*dev_priv), DRM_MEM_DRIVER );
dev->dev_private = NULL;
return -EINVAL;
}
@@ -625,7 +645,7 @@ static int radeon_do_init_cp( drm_device_t *dev, drm_radeon_init_t *init )
dev_priv->usec_timeout = init->usec_timeout;
if ( dev_priv->usec_timeout < 1 ||
dev_priv->usec_timeout > RADEON_MAX_USEC_TIMEOUT ) {
- DRM(free)( dev_priv, sizeof(*dev_priv), DRM_MEM_DRIVER );
+ drm_free( dev_priv, sizeof(*dev_priv), DRM_MEM_DRIVER );
dev->dev_private = NULL;
return -EINVAL;
}
@@ -642,7 +662,7 @@ static int radeon_do_init_cp( drm_device_t *dev, drm_radeon_init_t *init )
*/
if ( ( init->cp_mode != RADEON_CSQ_PRIBM_INDDIS ) &&
( init->cp_mode != RADEON_CSQ_PRIBM_INDBM ) ) {
- DRM(free)( dev_priv, sizeof(*dev_priv), DRM_MEM_DRIVER );
+ drm_free( dev_priv, sizeof(*dev_priv), DRM_MEM_DRIVER );
dev->dev_private = NULL;
return -EINVAL;
}
@@ -702,42 +722,49 @@ static int radeon_do_init_cp( drm_device_t *dev, drm_radeon_init_t *init )
RADEON_BFACE_SOLID |
RADEON_FFACE_SOLID |
RADEON_FLAT_SHADE_VTX_LAST |
+
RADEON_DIFFUSE_SHADE_FLAT |
RADEON_ALPHA_SHADE_FLAT |
RADEON_SPECULAR_SHADE_FLAT |
RADEON_FOG_SHADE_FLAT |
+
RADEON_VTX_PIX_CENTER_OGL |
RADEON_ROUND_MODE_TRUNC |
RADEON_ROUND_PREC_8TH_PIX);
- list_for_each(list, &dev->maplist->head) {
- drm_map_list_t *r_list = (drm_map_list_t *)list;
- if( r_list->map &&
- r_list->map->type == _DRM_SHM &&
- r_list->map->flags & _DRM_CONTAINS_LOCK ) {
- dev_priv->sarea = r_list->map;
- break;
- }
- }
-
- DRM_FIND_MAP( dev_priv->fb, init->fb_offset );
- DRM_FIND_MAP( dev_priv->mmio, init->mmio_offset );
- DRM_FIND_MAP( dev_priv->cp_ring, init->ring_offset );
- DRM_FIND_MAP( dev_priv->ring_rptr, init->ring_rptr_offset );
- DRM_FIND_MAP( dev_priv->buffers, init->buffers_offset );
+ /* FIXME: We want multiple shared areas, including one shared
+ * only by the X Server and kernel module.
+ */
+ for ( i = 0 ; i < dev->map_count ; i++ ) {
+ if ( dev->maplist[i]->type == _DRM_SHM ) {
+ dev_priv->sarea = dev->maplist[i];
+ break;
+ }
+ }
+
+ DO_FIND_MAP( dev_priv->fb, init->fb_offset );
+ DO_FIND_MAP( dev_priv->mmio, init->mmio_offset );
+ DO_FIND_MAP( dev_priv->cp_ring, init->ring_offset );
+ DO_FIND_MAP( dev_priv->ring_rptr, init->ring_rptr_offset );
+ DO_FIND_MAP( dev_priv->buffers, init->buffers_offset );
if ( !dev_priv->is_pci ) {
- DRM_FIND_MAP( dev_priv->agp_textures,
- init->agp_textures_offset );
+ DO_FIND_MAP( dev_priv->agp_textures,
+ init->agp_textures_offset );
}
dev_priv->sarea_priv =
(drm_radeon_sarea_t *)((u8 *)dev_priv->sarea->handle +
init->sarea_priv_offset);
- DRM_IOREMAP( dev_priv->cp_ring );
- DRM_IOREMAP( dev_priv->ring_rptr );
- DRM_IOREMAP( dev_priv->buffers );
+ DO_IOREMAP( dev_priv->cp_ring );
+ DO_IOREMAP( dev_priv->ring_rptr );
+ DO_IOREMAP( dev_priv->buffers );
+#if 0
+ if ( !dev_priv->is_pci ) {
+ DO_IOREMAP( dev_priv->agp_textures );
+ }
+#endif
dev_priv->agp_size = init->agp_size;
dev_priv->agp_vm_start = RADEON_READ( RADEON_CONFIG_APER_SIZE );
@@ -752,13 +779,11 @@ static int radeon_do_init_cp( drm_device_t *dev, drm_radeon_init_t *init )
dev_priv->ring.end = ((u32 *)dev_priv->cp_ring->handle
+ init->ring_size / sizeof(u32));
dev_priv->ring.size = init->ring_size;
- dev_priv->ring.size_l2qw = DRM(order)( init->ring_size / 8 );
+ dev_priv->ring.size_l2qw = drm_order( init->ring_size / 8 );
dev_priv->ring.tail_mask =
(dev_priv->ring.size / sizeof(u32)) - 1;
- dev_priv->ring.high_mark = RADEON_RING_HIGH_MARK;
-
#if 0
/* Initialize the scratch register pointer. This will cause
* the scratch register values to be written out to memory
@@ -798,17 +823,22 @@ static int radeon_do_init_cp( drm_device_t *dev, drm_radeon_init_t *init )
return 0;
}
-int radeon_do_cleanup_cp( drm_device_t *dev )
+static int radeon_do_cleanup_cp( drm_device_t *dev )
{
if ( dev->dev_private ) {
drm_radeon_private_t *dev_priv = dev->dev_private;
- DRM_IOREMAPFREE( dev_priv->cp_ring );
- DRM_IOREMAPFREE( dev_priv->ring_rptr );
- DRM_IOREMAPFREE( dev_priv->buffers );
+ DO_IOREMAPFREE( dev_priv->cp_ring );
+ DO_IOREMAPFREE( dev_priv->ring_rptr );
+ DO_IOREMAPFREE( dev_priv->buffers );
+#if 0
+ if ( !dev_priv->is_pci ) {
+ DO_IOREMAPFREE( dev_priv->agp_textures );
+ }
+#endif
- DRM(free)( dev->dev_private, sizeof(drm_radeon_private_t),
- DRM_MEM_DRIVER );
+ drm_free( dev->dev_private, sizeof(drm_radeon_private_t),
+ DRM_MEM_DRIVER );
dev->dev_private = NULL;
}
@@ -843,8 +873,11 @@ int radeon_cp_start( struct inode *inode, struct file *filp,
drm_radeon_private_t *dev_priv = dev->dev_private;
DRM_DEBUG( "%s\n", __FUNCTION__ );
- LOCK_TEST_WITH_RETURN( dev );
-
+ if ( !_DRM_LOCK_IS_HELD( dev->lock.hw_lock->lock ) ||
+ dev->lock.pid != current->pid ) {
+ DRM_ERROR( "%s called without lock held\n", __FUNCTION__ );
+ return -EINVAL;
+ }
if ( dev_priv->cp_running ) {
DRM_DEBUG( "%s while CP running\n", __FUNCTION__ );
return 0;
@@ -873,7 +906,11 @@ int radeon_cp_stop( struct inode *inode, struct file *filp,
int ret;
DRM_DEBUG( "%s\n", __FUNCTION__ );
- LOCK_TEST_WITH_RETURN( dev );
+ if ( !_DRM_LOCK_IS_HELD( dev->lock.hw_lock->lock ) ||
+ dev->lock.pid != current->pid ) {
+ DRM_ERROR( "%s called without lock held\n", __FUNCTION__ );
+ return -EINVAL;
+ }
if ( copy_from_user( &stop, (drm_radeon_init_t *)arg, sizeof(stop) ) )
return -EFAULT;
@@ -915,8 +952,11 @@ int radeon_cp_reset( struct inode *inode, struct file *filp,
drm_radeon_private_t *dev_priv = dev->dev_private;
DRM_DEBUG( "%s\n", __FUNCTION__ );
- LOCK_TEST_WITH_RETURN( dev );
-
+ if ( !_DRM_LOCK_IS_HELD( dev->lock.hw_lock->lock ) ||
+ dev->lock.pid != current->pid ) {
+ DRM_ERROR( "%s called without lock held\n", __FUNCTION__ );
+ return -EINVAL;
+ }
if ( !dev_priv ) {
DRM_DEBUG( "%s called before init done\n", __FUNCTION__ );
return -EINVAL;
@@ -938,7 +978,11 @@ int radeon_cp_idle( struct inode *inode, struct file *filp,
drm_radeon_private_t *dev_priv = dev->dev_private;
DRM_DEBUG( "%s\n", __FUNCTION__ );
- LOCK_TEST_WITH_RETURN( dev );
+ if ( !_DRM_LOCK_IS_HELD( dev->lock.hw_lock->lock ) ||
+ dev->lock.pid != current->pid ) {
+ DRM_ERROR( "%s called without lock held\n", __FUNCTION__ );
+ return -EINVAL;
+ }
return radeon_do_cp_idle( dev_priv );
}
@@ -950,7 +994,11 @@ int radeon_engine_reset( struct inode *inode, struct file *filp,
drm_device_t *dev = priv->dev;
DRM_DEBUG( "%s\n", __FUNCTION__ );
- LOCK_TEST_WITH_RETURN( dev );
+ if ( !_DRM_LOCK_IS_HELD( dev->lock.hw_lock->lock ) ||
+ dev->lock.pid != current->pid ) {
+ DRM_ERROR( "%s called without lock held\n", __FUNCTION__ );
+ return -EINVAL;
+ }
return radeon_do_engine_reset( dev );
}
@@ -1000,7 +1048,11 @@ int radeon_fullscreen( struct inode *inode, struct file *filp,
drm_device_t *dev = priv->dev;
drm_radeon_fullscreen_t fs;
- LOCK_TEST_WITH_RETURN( dev );
+ if ( !_DRM_LOCK_IS_HELD( dev->lock.hw_lock->lock ) ||
+ dev->lock.pid != current->pid ) {
+ DRM_ERROR( "%s called without lock held\n", __FUNCTION__ );
+ return -EINVAL;
+ }
if ( copy_from_user( &fs, (drm_radeon_fullscreen_t *)arg,
sizeof(fs) ) )
@@ -1033,8 +1085,8 @@ static int radeon_freelist_init( drm_device_t *dev )
drm_radeon_freelist_t *entry;
int i;
- dev_priv->head = DRM(alloc)( sizeof(drm_radeon_freelist_t),
- DRM_MEM_DRIVER );
+ dev_priv->head = drm_alloc( sizeof(drm_radeon_freelist_t),
+ DRM_MEM_DRIVER );
if ( dev_priv->head == NULL )
return -ENOMEM;
@@ -1045,8 +1097,8 @@ static int radeon_freelist_init( drm_device_t *dev )
buf = dma->buflist[i];
buf_priv = buf->dev_private;
- entry = DRM(alloc)( sizeof(drm_radeon_freelist_t),
- DRM_MEM_DRIVER );
+ entry = drm_alloc( sizeof(drm_radeon_freelist_t),
+ DRM_MEM_DRIVER );
if ( !entry ) return -ENOMEM;
entry->age = RADEON_BUFFER_FREE;
@@ -1166,9 +1218,13 @@ int radeon_wait_ring( drm_radeon_private_t *dev_priv, int n )
int i;
for ( i = 0 ; i < dev_priv->usec_timeout ; i++ ) {
- radeon_update_ring_snapshot( ring );
- if ( ring->space > n )
+ ring->space = *ring->head - ring->tail;
+ if ( ring->space <= 0 )
+ ring->space += ring->size;
+
+ if ( ring->space >= n )
return 0;
+
udelay( 1 );
}
@@ -1177,6 +1233,17 @@ int radeon_wait_ring( drm_radeon_private_t *dev_priv, int n )
return -EBUSY;
}
+void radeon_update_ring_snapshot( drm_radeon_private_t *dev_priv )
+{
+ drm_radeon_ring_buffer_t *ring = &dev_priv->ring;
+
+ ring->space = *ring->head - ring->tail;
+ if ( ring->space == 0 )
+ atomic_inc( &dev_priv->idle_count );
+ if ( ring->space <= 0 )
+ ring->space += ring->size;
+}
+
static int radeon_cp_get_buffers( drm_device_t *dev, drm_dma_t *d )
{
int i;
@@ -1209,11 +1276,15 @@ int radeon_cp_buffers( struct inode *inode, struct file *filp,
int ret = 0;
drm_dma_t d;
- LOCK_TEST_WITH_RETURN( dev );
-
- if ( copy_from_user( &d, (drm_dma_t *)arg, sizeof(d) ) )
+ if ( copy_from_user( &d, (drm_dma_t *) arg, sizeof(d) ) )
return -EFAULT;
+ if ( !_DRM_LOCK_IS_HELD( dev->lock.hw_lock->lock ) ||
+ dev->lock.pid != current->pid ) {
+ DRM_ERROR( "%s called without lock held\n", __FUNCTION__ );
+ return -EINVAL;
+ }
+
/* Please don't send us buffers.
*/
if ( d.send_count != 0 ) {
@@ -1236,7 +1307,7 @@ int radeon_cp_buffers( struct inode *inode, struct file *filp,
ret = radeon_cp_get_buffers( dev, &d );
}
- if ( copy_to_user( (drm_dma_t *)arg, &d, sizeof(d) ) )
+ if ( copy_to_user( (drm_dma_t *) arg, &d, sizeof(d) ) )
return -EFAULT;
return ret;
diff --git a/linux/radeon_drm.h b/linux/radeon_drm.h
index 50a7d6ed8..c5f9f66d1 100644
--- a/linux/radeon_drm.h
+++ b/linux/radeon_drm.h
@@ -26,6 +26,7 @@
* Authors:
* Kevin E. Martin <martin@valinux.com>
* Gareth Hughes <gareth@valinux.com>
+ *
*/
#ifndef __RADEON_DRM_H__
@@ -73,7 +74,7 @@
/* Vertex/indirect buffer size
*/
-#define RADEON_BUFFER_SIZE 65536
+#define RADEON_BUFFER_SIZE 16384
/* Byte offsets for indirect buffer data
*/
@@ -275,18 +276,15 @@ typedef struct drm_radeon_fullscreen {
#define CLEAR_Y2 3
#define CLEAR_DEPTH 4
-typedef union drm_radeon_clear_rect {
- float f[5];
- unsigned int ui[5];
-} drm_radeon_clear_rect_t;
-
typedef struct drm_radeon_clear {
unsigned int flags;
+ int x, y, w, h;
unsigned int clear_color;
unsigned int clear_depth;
- unsigned int color_mask;
- unsigned int depth_mask;
- drm_radeon_clear_rect_t *depth_boxes;
+ union {
+ float f[5];
+ unsigned int ui[5];
+ } rect;
} drm_radeon_clear_t;
typedef struct drm_radeon_vertex {
@@ -304,20 +302,14 @@ typedef struct drm_radeon_indices {
int discard; /* Client finished with buffer? */
} drm_radeon_indices_t;
-typedef struct drm_radeon_tex_image {
- unsigned int x, y; /* Blit coordinates */
- unsigned int width, height;
- const void *data;
-} drm_radeon_tex_image_t;
-
-typedef struct drm_radeon_texture {
- int offset;
+typedef struct drm_radeon_blit {
+ int idx;
int pitch;
+ int offset;
int format;
- int width; /* Texture image coordinates */
- int height;
- drm_radeon_tex_image_t *image;
-} drm_radeon_texture_t;
+ unsigned short x, y;
+ unsigned short width, height;
+} drm_radeon_blit_t;
typedef struct drm_radeon_stipple {
unsigned int *mask;
diff --git a/linux/radeon_drv.c b/linux/radeon_drv.c
index dba2037f9..0113ed97c 100644
--- a/linux/radeon_drv.c
+++ b/linux/radeon_drv.c
@@ -1,7 +1,7 @@
/* radeon_drv.c -- ATI Radeon driver -*- linux-c -*-
- * Created: Wed Feb 14 17:10:04 2001 by gareth@valinux.com
*
- * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
+ * Copyright 1999, 2000 Precision Insight, Inc., Cedar Park, Texas.
+ * Copyright 2000 VA Linux Systems, Inc., Fremont, California.
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
@@ -18,70 +18,685 @@
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Kevin E. Martin <martin@valinux.com>
+ * Rickard E. (Rik) Faith <faith@valinux.com>
*
- * Authors:
- * Gareth Hughes <gareth@valinux.com>
*/
#include <linux/config.h>
-#include "radeon.h"
#include "drmP.h"
#include "radeon_drv.h"
-#define DRIVER_AUTHOR "Gareth Hughes, VA Linux Systems Inc."
-
-#define DRIVER_NAME "radeon"
-#define DRIVER_DESC "ATI Radeon"
-#define DRIVER_DATE "20010308"
-
-#define DRIVER_MAJOR 1
-#define DRIVER_MINOR 0
-#define DRIVER_PATCHLEVEL 1
-
-#define DRIVER_IOCTLS \
- [DRM_IOCTL_NR(DRM_IOCTL_DMA)] = { radeon_cp_buffers, 1, 0 }, \
- [DRM_IOCTL_NR(DRM_IOCTL_RADEON_CP_INIT)] = { radeon_cp_init, 1, 1 }, \
- [DRM_IOCTL_NR(DRM_IOCTL_RADEON_CP_START)] = { radeon_cp_start, 1, 1 }, \
- [DRM_IOCTL_NR(DRM_IOCTL_RADEON_CP_STOP)] = { radeon_cp_stop, 1, 1 }, \
- [DRM_IOCTL_NR(DRM_IOCTL_RADEON_CP_RESET)] = { radeon_cp_reset, 1, 1 }, \
- [DRM_IOCTL_NR(DRM_IOCTL_RADEON_CP_IDLE)] = { radeon_cp_idle, 1, 0 }, \
- [DRM_IOCTL_NR(DRM_IOCTL_RADEON_RESET)] = { radeon_engine_reset, 1, 0 }, \
- [DRM_IOCTL_NR(DRM_IOCTL_RADEON_FULLSCREEN)] = { radeon_fullscreen, 1, 0 }, \
- [DRM_IOCTL_NR(DRM_IOCTL_RADEON_SWAP)] = { radeon_cp_swap, 1, 0 }, \
- [DRM_IOCTL_NR(DRM_IOCTL_RADEON_CLEAR)] = { radeon_cp_clear, 1, 0 }, \
- [DRM_IOCTL_NR(DRM_IOCTL_RADEON_VERTEX)] = { radeon_cp_vertex, 1, 0 }, \
- [DRM_IOCTL_NR(DRM_IOCTL_RADEON_INDICES)] = { radeon_cp_indices, 1, 0 }, \
- [DRM_IOCTL_NR(DRM_IOCTL_RADEON_TEXTURE)] = { radeon_cp_texture, 1, 0 }, \
- [DRM_IOCTL_NR(DRM_IOCTL_RADEON_STIPPLE)] = { radeon_cp_stipple, 1, 0 }, \
- [DRM_IOCTL_NR(DRM_IOCTL_RADEON_INDIRECT)] = { radeon_cp_indirect, 1, 1 },
+#define RADEON_NAME "radeon"
+#define RADEON_DESC "ATI Radeon"
+#define RADEON_DATE "20010105"
+#define RADEON_MAJOR 1
+#define RADEON_MINOR 0
+#define RADEON_PATCHLEVEL 0
+static drm_device_t radeon_device;
+drm_ctx_t radeon_res_ctx;
-#if 0
-/* GH: Count data sent to card via ring or vertex/indirect buffers.
+static struct file_operations radeon_fops = {
+#if LINUX_VERSION_CODE >= 0x020400
+ /* This started being used during 2.4.0-test */
+ owner: THIS_MODULE,
+#endif
+ open: radeon_open,
+ flush: drm_flush,
+ release: radeon_release,
+ ioctl: radeon_ioctl,
+ mmap: drm_mmap,
+ read: drm_read,
+ fasync: drm_fasync,
+ poll: drm_poll,
+};
+
+static struct miscdevice radeon_misc = {
+ minor: MISC_DYNAMIC_MINOR,
+ name: RADEON_NAME,
+ fops: &radeon_fops,
+};
+
+static drm_ioctl_desc_t radeon_ioctls[] = {
+ [DRM_IOCTL_NR(DRM_IOCTL_VERSION)] = { radeon_version, 0, 0 },
+ [DRM_IOCTL_NR(DRM_IOCTL_GET_UNIQUE)] = { drm_getunique, 0, 0 },
+ [DRM_IOCTL_NR(DRM_IOCTL_GET_MAGIC)] = { drm_getmagic, 0, 0 },
+ [DRM_IOCTL_NR(DRM_IOCTL_IRQ_BUSID)] = { drm_irq_busid, 0, 1 },
+
+ [DRM_IOCTL_NR(DRM_IOCTL_SET_UNIQUE)] = { drm_setunique, 1, 1 },
+ [DRM_IOCTL_NR(DRM_IOCTL_BLOCK)] = { drm_block, 1, 1 },
+ [DRM_IOCTL_NR(DRM_IOCTL_UNBLOCK)] = { drm_unblock, 1, 1 },
+ [DRM_IOCTL_NR(DRM_IOCTL_AUTH_MAGIC)] = { drm_authmagic, 1, 1 },
+ [DRM_IOCTL_NR(DRM_IOCTL_ADD_MAP)] = { drm_addmap, 1, 1 },
+ [DRM_IOCTL_NR(DRM_IOCTL_ADD_BUFS)] = { radeon_addbufs, 1, 1 },
+ [DRM_IOCTL_NR(DRM_IOCTL_MARK_BUFS)] = { drm_markbufs, 1, 1 },
+ [DRM_IOCTL_NR(DRM_IOCTL_INFO_BUFS)] = { drm_infobufs, 1, 0 },
+ [DRM_IOCTL_NR(DRM_IOCTL_MAP_BUFS)] = { radeon_mapbufs, 1, 0 },
+ [DRM_IOCTL_NR(DRM_IOCTL_FREE_BUFS)] = { drm_freebufs, 1, 0 },
+
+ [DRM_IOCTL_NR(DRM_IOCTL_ADD_CTX)] = { radeon_addctx, 1, 1 },
+ [DRM_IOCTL_NR(DRM_IOCTL_RM_CTX)] = { radeon_rmctx, 1, 1 },
+ [DRM_IOCTL_NR(DRM_IOCTL_MOD_CTX)] = { radeon_modctx, 1, 1 },
+ [DRM_IOCTL_NR(DRM_IOCTL_GET_CTX)] = { radeon_getctx, 1, 0 },
+ [DRM_IOCTL_NR(DRM_IOCTL_SWITCH_CTX)] = { radeon_switchctx, 1, 1 },
+ [DRM_IOCTL_NR(DRM_IOCTL_NEW_CTX)] = { radeon_newctx, 1, 1 },
+ [DRM_IOCTL_NR(DRM_IOCTL_RES_CTX)] = { radeon_resctx, 1, 0 },
+ [DRM_IOCTL_NR(DRM_IOCTL_ADD_DRAW)] = { drm_adddraw, 1, 1 },
+ [DRM_IOCTL_NR(DRM_IOCTL_RM_DRAW)] = { drm_rmdraw, 1, 1 },
+ [DRM_IOCTL_NR(DRM_IOCTL_DMA)] = { radeon_cp_buffers, 1, 0 },
+ [DRM_IOCTL_NR(DRM_IOCTL_LOCK)] = { radeon_lock, 1, 0 },
+ [DRM_IOCTL_NR(DRM_IOCTL_UNLOCK)] = { radeon_unlock, 1, 0 },
+ [DRM_IOCTL_NR(DRM_IOCTL_FINISH)] = { drm_finish, 1, 0 },
+
+#if defined(CONFIG_AGP) || defined(CONFIG_AGP_MODULE)
+ [DRM_IOCTL_NR(DRM_IOCTL_AGP_ACQUIRE)] = { drm_agp_acquire, 1, 1 },
+ [DRM_IOCTL_NR(DRM_IOCTL_AGP_RELEASE)] = { drm_agp_release, 1, 1 },
+ [DRM_IOCTL_NR(DRM_IOCTL_AGP_ENABLE)] = { drm_agp_enable, 1, 1 },
+ [DRM_IOCTL_NR(DRM_IOCTL_AGP_INFO)] = { drm_agp_info, 1, 0 },
+ [DRM_IOCTL_NR(DRM_IOCTL_AGP_ALLOC)] = { drm_agp_alloc, 1, 1 },
+ [DRM_IOCTL_NR(DRM_IOCTL_AGP_FREE)] = { drm_agp_free, 1, 1 },
+ [DRM_IOCTL_NR(DRM_IOCTL_AGP_BIND)] = { drm_agp_bind, 1, 1 },
+ [DRM_IOCTL_NR(DRM_IOCTL_AGP_UNBIND)] = { drm_agp_unbind, 1, 1 },
+#endif
+
+ [DRM_IOCTL_NR(DRM_IOCTL_RADEON_CP_INIT)] = { radeon_cp_init, 1, 1 },
+ [DRM_IOCTL_NR(DRM_IOCTL_RADEON_CP_START)] = { radeon_cp_start, 1, 1 },
+ [DRM_IOCTL_NR(DRM_IOCTL_RADEON_CP_STOP)] = { radeon_cp_stop, 1, 1 },
+ [DRM_IOCTL_NR(DRM_IOCTL_RADEON_CP_RESET)] = { radeon_cp_reset, 1, 1 },
+ [DRM_IOCTL_NR(DRM_IOCTL_RADEON_CP_IDLE)] = { radeon_cp_idle, 1, 0 },
+ [DRM_IOCTL_NR(DRM_IOCTL_RADEON_RESET)] = { radeon_engine_reset, 1, 0 },
+ [DRM_IOCTL_NR(DRM_IOCTL_RADEON_FULLSCREEN)] = { radeon_fullscreen, 1, 0 },
+
+ [DRM_IOCTL_NR(DRM_IOCTL_RADEON_SWAP)] = { radeon_cp_swap, 1, 0 },
+ [DRM_IOCTL_NR(DRM_IOCTL_RADEON_CLEAR)] = { radeon_cp_clear, 1, 0 },
+ [DRM_IOCTL_NR(DRM_IOCTL_RADEON_VERTEX)] = { radeon_cp_vertex, 1, 0 },
+ [DRM_IOCTL_NR(DRM_IOCTL_RADEON_INDICES)] = { radeon_cp_indices, 1, 0 },
+ [DRM_IOCTL_NR(DRM_IOCTL_RADEON_BLIT)] = { radeon_cp_blit, 1, 0 },
+ [DRM_IOCTL_NR(DRM_IOCTL_RADEON_STIPPLE)] = { radeon_cp_stipple, 1, 0 },
+ [DRM_IOCTL_NR(DRM_IOCTL_RADEON_INDIRECT)]= { radeon_cp_indirect,1, 1 },
+};
+#define RADEON_IOCTL_COUNT DRM_ARRAY_SIZE(radeon_ioctls)
+
+#ifdef MODULE
+static char *radeon = NULL;
+#endif
+
+MODULE_AUTHOR("VA Linux Systems, Inc.");
+MODULE_DESCRIPTION("radeon");
+MODULE_PARM(radeon, "s");
+
+#ifndef MODULE
+/* radeon_options is called by the kernel to parse command-line options
+ * passed via the boot-loader (e.g., LILO). It calls the insmod option
+ * routine, drm_parse_drm.
*/
-#define __HAVE_COUNTERS 3
-#define __HAVE_COUNTER6 _DRM_STAT_IRQ
-#define __HAVE_COUNTER7 _DRM_STAT_PRIMARY
-#define __HAVE_COUNTER8 _DRM_STAT_SECONDARY
-#endif
-
-
-#include "drm_agpsupport.h"
-#include "drm_auth.h"
-#include "drm_bufs.h"
-#include "drm_context.h"
-#include "drm_dma.h"
-#include "drm_drawable.h"
-#include "drm_drv.h"
-#include "drm_fops.h"
-#include "drm_init.h"
-#include "drm_ioctl.h"
-#include "drm_lock.h"
-#include "drm_memory.h"
-#include "drm_proc.h"
-#include "drm_vm.h"
-#include "drm_stub.h"
+
+static int __init radeon_options(char *str)
+{
+ drm_parse_options(str);
+ return 1;
+}
+
+__setup("radeon=", radeon_options);
+#endif
+
+static int radeon_setup(drm_device_t *dev)
+{
+ int i;
+
+ atomic_set(&dev->ioctl_count, 0);
+ atomic_set(&dev->vma_count, 0);
+ dev->buf_use = 0;
+ atomic_set(&dev->buf_alloc, 0);
+
+ drm_dma_setup(dev);
+
+ atomic_set(&dev->total_open, 0);
+ atomic_set(&dev->total_close, 0);
+ atomic_set(&dev->total_ioctl, 0);
+ atomic_set(&dev->total_irq, 0);
+ atomic_set(&dev->total_ctx, 0);
+ atomic_set(&dev->total_locks, 0);
+ atomic_set(&dev->total_unlocks, 0);
+ atomic_set(&dev->total_contends, 0);
+ atomic_set(&dev->total_sleeps, 0);
+
+ for (i = 0; i < DRM_HASH_SIZE; i++) {
+ dev->magiclist[i].head = NULL;
+ dev->magiclist[i].tail = NULL;
+ }
+ dev->maplist = NULL;
+ dev->map_count = 0;
+ dev->vmalist = NULL;
+ dev->lock.hw_lock = NULL;
+ init_waitqueue_head(&dev->lock.lock_queue);
+ dev->queue_count = 0;
+ dev->queue_reserved = 0;
+ dev->queue_slots = 0;
+ dev->queuelist = NULL;
+ dev->irq = 0;
+ dev->context_flag = 0;
+ dev->interrupt_flag = 0;
+ dev->dma_flag = 0;
+ dev->last_context = 0;
+ dev->last_switch = 0;
+ dev->last_checked = 0;
+ init_timer(&dev->timer);
+ init_waitqueue_head(&dev->context_wait);
+
+ dev->ctx_start = 0;
+ dev->lck_start = 0;
+
+ dev->buf_rp = dev->buf;
+ dev->buf_wp = dev->buf;
+ dev->buf_end = dev->buf + DRM_BSZ;
+ dev->buf_async = NULL;
+ init_waitqueue_head(&dev->buf_readers);
+ init_waitqueue_head(&dev->buf_writers);
+
+ radeon_res_ctx.handle = -1;
+
+ DRM_DEBUG("\n");
+
+ /* The kernel's context could be created here, but is now created
+ in drm_dma_enqueue. This is more resource-efficient for
+ hardware that does not do DMA, but may mean that
+ drm_select_queue fails between the time the interrupt is
+ initialized and the time the queues are initialized. */
+
+ return 0;
+}
+
+
+static int radeon_takedown(drm_device_t *dev)
+{
+ int i;
+ drm_magic_entry_t *pt, *next;
+ drm_map_t *map;
+ drm_vma_entry_t *vma, *vma_next;
+
+ DRM_DEBUG("\n");
+
+ down(&dev->struct_sem);
+ del_timer(&dev->timer);
+
+ if (dev->devname) {
+ drm_free(dev->devname, strlen(dev->devname)+1, DRM_MEM_DRIVER);
+ dev->devname = NULL;
+ }
+
+ if (dev->unique) {
+ drm_free(dev->unique, strlen(dev->unique)+1, DRM_MEM_DRIVER);
+ dev->unique = NULL;
+ dev->unique_len = 0;
+ }
+ /* Clear pid list */
+ for (i = 0; i < DRM_HASH_SIZE; i++) {
+ for (pt = dev->magiclist[i].head; pt; pt = next) {
+ next = pt->next;
+ drm_free(pt, sizeof(*pt), DRM_MEM_MAGIC);
+ }
+ dev->magiclist[i].head = dev->magiclist[i].tail = NULL;
+ }
+
+#if defined(CONFIG_AGP) || defined(CONFIG_AGP_MODULE)
+ /* Clear AGP information */
+ if (dev->agp) {
+ drm_agp_mem_t *entry;
+ drm_agp_mem_t *nexte;
+
+ /* Remove AGP resources, but leave dev->agp
+ intact until radeon_cleanup is called. */
+ for (entry = dev->agp->memory; entry; entry = nexte) {
+ nexte = entry->next;
+ if (entry->bound) drm_unbind_agp(entry->memory);
+ drm_free_agp(entry->memory, entry->pages);
+ drm_free(entry, sizeof(*entry), DRM_MEM_AGPLISTS);
+ }
+ dev->agp->memory = NULL;
+
+ if (dev->agp->acquired) _drm_agp_release();
+
+ dev->agp->acquired = 0;
+ dev->agp->enabled = 0;
+ }
+#endif
+
+ /* Clear vma list (only built for debugging) */
+ if (dev->vmalist) {
+ for (vma = dev->vmalist; vma; vma = vma_next) {
+ vma_next = vma->next;
+ drm_free(vma, sizeof(*vma), DRM_MEM_VMAS);
+ }
+ dev->vmalist = NULL;
+ }
+
+ /* Clear map area and mtrr information */
+ if (dev->maplist) {
+ for (i = 0; i < dev->map_count; i++) {
+ map = dev->maplist[i];
+ switch (map->type) {
+ case _DRM_REGISTERS:
+ case _DRM_FRAME_BUFFER:
+#ifdef CONFIG_MTRR
+ if (map->mtrr >= 0) {
+ int retcode;
+ retcode = mtrr_del(map->mtrr,
+ map->offset,
+ map->size);
+ DRM_DEBUG("mtrr_del = %d\n", retcode);
+ }
+#endif
+ drm_ioremapfree(map->handle, map->size);
+ break;
+ case _DRM_SHM:
+ drm_free_pages((unsigned long)map->handle,
+ drm_order(map->size)
+ - PAGE_SHIFT,
+ DRM_MEM_SAREA);
+ break;
+ case _DRM_AGP:
+ /* Do nothing here, because this is all
+ handled in the AGP/GART driver. */
+ break;
+ }
+ drm_free(map, sizeof(*map), DRM_MEM_MAPS);
+ }
+ drm_free(dev->maplist,
+ dev->map_count * sizeof(*dev->maplist),
+ DRM_MEM_MAPS);
+ dev->maplist = NULL;
+ dev->map_count = 0;
+ }
+
+ drm_dma_takedown(dev);
+
+ dev->queue_count = 0;
+ if (dev->lock.hw_lock) {
+ dev->lock.hw_lock = NULL; /* SHM removed */
+ dev->lock.pid = 0;
+ wake_up_interruptible(&dev->lock.lock_queue);
+ }
+ up(&dev->struct_sem);
+
+ return 0;
+}
+
+/* radeon_init is called via init_module at module load time, or via
+ * linux/init/main.c (this is not currently supported). */
+
+static int __init radeon_init(void)
+{
+ int retcode;
+ drm_device_t *dev = &radeon_device;
+
+ DRM_DEBUG("\n");
+
+ memset((void *)dev, 0, sizeof(*dev));
+ dev->count_lock = SPIN_LOCK_UNLOCKED;
+ sema_init(&dev->struct_sem, 1);
+
+#ifdef MODULE
+ drm_parse_options(radeon);
+#endif
+
+ if ((retcode = misc_register(&radeon_misc))) {
+ DRM_ERROR("Cannot register \"%s\"\n", RADEON_NAME);
+ return retcode;
+ }
+ dev->device = MKDEV(MISC_MAJOR, radeon_misc.minor);
+ dev->name = RADEON_NAME;
+
+ drm_mem_init();
+ drm_proc_init(dev);
+
+#if defined(CONFIG_AGP) || defined(CONFIG_AGP_MODULE)
+ dev->agp = drm_agp_init();
+ if (dev->agp == NULL) {
+ DRM_ERROR("Cannot initialize agpgart module.\n");
+ drm_proc_cleanup();
+ misc_deregister(&radeon_misc);
+ radeon_takedown(dev);
+ return -ENOMEM;
+ }
+
+#ifdef CONFIG_MTRR
+ dev->agp->agp_mtrr = mtrr_add(dev->agp->agp_info.aper_base,
+ dev->agp->agp_info.aper_size*1024*1024,
+ MTRR_TYPE_WRCOMB,
+ 1);
+#endif
+#endif
+
+ if((retcode = drm_ctxbitmap_init(dev))) {
+ DRM_ERROR("Cannot allocate memory for context bitmap.\n");
+ drm_proc_cleanup();
+ misc_deregister(&radeon_misc);
+ radeon_takedown(dev);
+ return retcode;
+ }
+
+ DRM_INFO("Initialized %s %d.%d.%d %s on minor %d\n",
+ RADEON_NAME,
+ RADEON_MAJOR,
+ RADEON_MINOR,
+ RADEON_PATCHLEVEL,
+ RADEON_DATE,
+ radeon_misc.minor);
+
+ return 0;
+}
+
+/* radeon_cleanup is called via cleanup_module at module unload time. */
+
+static void __exit radeon_cleanup(void)
+{
+ drm_device_t *dev = &radeon_device;
+
+ DRM_DEBUG("\n");
+
+ drm_proc_cleanup();
+ if (misc_deregister(&radeon_misc)) {
+ DRM_ERROR("Cannot unload module\n");
+ } else {
+ DRM_INFO("Module unloaded\n");
+ }
+ drm_ctxbitmap_cleanup(dev);
+ radeon_takedown(dev);
+#if defined(CONFIG_AGP) || defined(CONFIG_AGP_MODULE)
+ if (dev->agp) {
+ drm_agp_uninit();
+ drm_free(dev->agp, sizeof(*dev->agp), DRM_MEM_AGPLISTS);
+ dev->agp = NULL;
+ }
+#endif
+}
+
+module_init(radeon_init);
+module_exit(radeon_cleanup);
+
+
+int radeon_version(struct inode *inode, struct file *filp, unsigned int cmd,
+ unsigned long arg)
+{
+ drm_version_t version;
+ int len;
+
+ if (copy_from_user(&version,
+ (drm_version_t *)arg,
+ sizeof(version)))
+ return -EFAULT;
+
+#define DRM_COPY(name,value) \
+ len = strlen(value); \
+ if (len > name##_len) len = name##_len; \
+ name##_len = strlen(value); \
+ if (len && name) { \
+ if (copy_to_user(name, value, len)) \
+ return -EFAULT; \
+ }
+
+ version.version_major = RADEON_MAJOR;
+ version.version_minor = RADEON_MINOR;
+ version.version_patchlevel = RADEON_PATCHLEVEL;
+
+ DRM_COPY(version.name, RADEON_NAME);
+ DRM_COPY(version.date, RADEON_DATE);
+ DRM_COPY(version.desc, RADEON_DESC);
+
+ if (copy_to_user((drm_version_t *)arg,
+ &version,
+ sizeof(version)))
+ return -EFAULT;
+ return 0;
+}
+
+int radeon_open(struct inode *inode, struct file *filp)
+{
+ drm_device_t *dev = &radeon_device;
+ int retcode = 0;
+
+ DRM_DEBUG("open_count = %d\n", dev->open_count);
+ if (!(retcode = drm_open_helper(inode, filp, dev))) {
+#if LINUX_VERSION_CODE < 0x020333
+ MOD_INC_USE_COUNT; /* Needed before Linux 2.3.51 */
+#endif
+ atomic_inc(&dev->total_open);
+ spin_lock(&dev->count_lock);
+ if (!dev->open_count++) {
+ spin_unlock(&dev->count_lock);
+ return radeon_setup(dev);
+ }
+ spin_unlock(&dev->count_lock);
+ }
+
+ return retcode;
+}
+
+int radeon_release(struct inode *inode, struct file *filp)
+{
+ drm_file_t *priv = filp->private_data;
+ drm_device_t *dev;
+ int retcode = 0;
+
+ lock_kernel();
+ dev = priv->dev;
+
+ DRM_DEBUG("open_count = %d\n", dev->open_count);
+
+ /* Force the cleanup of page flipping when required */
+ if ( dev->dev_private ) {
+ drm_radeon_private_t *dev_priv = dev->dev_private;
+ if ( dev_priv->page_flipping ) {
+ radeon_do_cleanup_pageflip( dev );
+ }
+ }
+
+ if (!(retcode = drm_release(inode, filp))) {
+#if LINUX_VERSION_CODE < 0x020333
+ MOD_DEC_USE_COUNT; /* Needed before Linux 2.3.51 */
+#endif
+ atomic_inc(&dev->total_close);
+ spin_lock(&dev->count_lock);
+ if (!--dev->open_count) {
+ if (atomic_read(&dev->ioctl_count) || dev->blocked) {
+ DRM_ERROR("Device busy: %d %d\n",
+ atomic_read(&dev->ioctl_count),
+ dev->blocked);
+ spin_unlock(&dev->count_lock);
+ unlock_kernel();
+ return -EBUSY;
+ }
+ spin_unlock(&dev->count_lock);
+ unlock_kernel();
+ return radeon_takedown(dev);
+ }
+ spin_unlock(&dev->count_lock);
+ }
+
+ unlock_kernel();
+ return retcode;
+}
+
+/* radeon_ioctl is called whenever a process performs an ioctl on /dev/drm. */
+
+int radeon_ioctl(struct inode *inode, struct file *filp, unsigned int cmd,
+ unsigned long arg)
+{
+ int nr = DRM_IOCTL_NR(cmd);
+ drm_file_t *priv = filp->private_data;
+ drm_device_t *dev = priv->dev;
+ int retcode = 0;
+ drm_ioctl_desc_t *ioctl;
+ drm_ioctl_t *func;
+
+ atomic_inc(&dev->ioctl_count);
+ atomic_inc(&dev->total_ioctl);
+ ++priv->ioctl_count;
+
+ DRM_DEBUG("pid = %d, cmd = 0x%02x, nr = 0x%02x, dev 0x%x, auth = %d\n",
+ current->pid, cmd, nr, dev->device, priv->authenticated);
+
+ if (nr >= RADEON_IOCTL_COUNT) {
+ retcode = -EINVAL;
+ } else {
+ ioctl = &radeon_ioctls[nr];
+ func = ioctl->func;
+
+ if (!func) {
+ DRM_DEBUG("no function\n");
+ retcode = -EINVAL;
+ } else if ((ioctl->root_only && !capable(CAP_SYS_ADMIN))
+ || (ioctl->auth_needed && !priv->authenticated)) {
+ retcode = -EACCES;
+ } else {
+ retcode = (func)(inode, filp, cmd, arg);
+ }
+ }
+
+ atomic_dec(&dev->ioctl_count);
+ return retcode;
+}
+
+int radeon_lock(struct inode *inode, struct file *filp, unsigned int cmd,
+ unsigned long arg)
+{
+ drm_file_t *priv = filp->private_data;
+ drm_device_t *dev = priv->dev;
+ DECLARE_WAITQUEUE(entry, current);
+ int ret = 0;
+ drm_lock_t lock;
+#if DRM_DMA_HISTOGRAM
+ cycles_t start;
+
+ dev->lck_start = start = get_cycles();
+#endif
+
+ if (copy_from_user(&lock, (drm_lock_t *)arg, sizeof(lock)))
+ return -EFAULT;
+
+ if (lock.context == DRM_KERNEL_CONTEXT) {
+ DRM_ERROR("Process %d using kernel context %d\n",
+ current->pid, lock.context);
+ return -EINVAL;
+ }
+
+ DRM_DEBUG("%d (pid %d) requests lock (0x%08x), flags = 0x%08x\n",
+ lock.context, current->pid, dev->lock.hw_lock->lock,
+ lock.flags);
+
+ if (lock.context < 0 /* || lock.context >= dev->queue_count */)
+ return -EINVAL;
+
+ if (!ret) {
+ add_wait_queue(&dev->lock.lock_queue, &entry);
+ for (;;) {
+ current->state = TASK_INTERRUPTIBLE;
+ if (!dev->lock.hw_lock) {
+ /* Device has been unregistered */
+ ret = -EINTR;
+ break;
+ }
+ if (drm_lock_take(&dev->lock.hw_lock->lock,
+ lock.context)) {
+ dev->lock.pid = current->pid;
+ dev->lock.lock_time = jiffies;
+ atomic_inc(&dev->total_locks);
+ break; /* Got lock */
+ }
+
+ /* Contention */
+ atomic_inc(&dev->total_sleeps);
+ schedule();
+ if (signal_pending(current)) {
+ ret = -ERESTARTSYS;
+ break;
+ }
+ }
+ current->state = TASK_RUNNING;
+ remove_wait_queue(&dev->lock.lock_queue, &entry);
+ }
+
+ if (!ret) {
+ sigemptyset(&dev->sigmask);
+ sigaddset(&dev->sigmask, SIGSTOP);
+ sigaddset(&dev->sigmask, SIGTSTP);
+ sigaddset(&dev->sigmask, SIGTTIN);
+ sigaddset(&dev->sigmask, SIGTTOU);
+ dev->sigdata.context = lock.context;
+ dev->sigdata.lock = dev->lock.hw_lock;
+ block_all_signals(drm_notifier, &dev->sigdata, &dev->sigmask);
+ if (lock.flags & _DRM_LOCK_READY) {
+ /* Wait for space in DMA/FIFO */
+ }
+ if (lock.flags & _DRM_LOCK_QUIESCENT) {
+ /* Make hardware quiescent */
+ DRM_DEBUG("not quiescent!\n");
+#if 0
+ radeon_quiescent(dev);
+#endif
+ }
+ }
+
+#if LINUX_VERSION_CODE < 0x020400
+ if (lock.context != radeon_res_ctx.handle) {
+ current->counter = 5;
+ current->priority = DEF_PRIORITY/4;
+ }
+#endif
+ DRM_DEBUG("%d %s\n", lock.context, ret ? "interrupted" : "has lock");
+
+#if DRM_DMA_HISTOGRAM
+ atomic_inc(&dev->histo.lacq[drm_histogram_slot(get_cycles() - start)]);
+#endif
+
+ return ret;
+}
+
+
+int radeon_unlock(struct inode *inode, struct file *filp, unsigned int cmd,
+ unsigned long arg)
+{
+ drm_file_t *priv = filp->private_data;
+ drm_device_t *dev = priv->dev;
+ drm_lock_t lock;
+
+ if (copy_from_user(&lock, (drm_lock_t *)arg, sizeof(lock)))
+ return -EFAULT;
+
+ if (lock.context == DRM_KERNEL_CONTEXT) {
+ DRM_ERROR("Process %d using kernel context %d\n",
+ current->pid, lock.context);
+ return -EINVAL;
+ }
+
+ DRM_DEBUG("%d frees lock (%d holds)\n",
+ lock.context,
+ _DRM_LOCKING_CONTEXT(dev->lock.hw_lock->lock));
+ atomic_inc(&dev->total_unlocks);
+ if (_DRM_LOCK_IS_CONT(dev->lock.hw_lock->lock))
+ atomic_inc(&dev->total_contends);
+ drm_lock_transfer(dev, &dev->lock.hw_lock->lock, DRM_KERNEL_CONTEXT);
+ /* FIXME: Try to send data to card here */
+ if (!dev->context_flag) {
+ if (drm_lock_free(dev, &dev->lock.hw_lock->lock,
+ DRM_KERNEL_CONTEXT)) {
+ DRM_ERROR("\n");
+ }
+ }
+
+#if LINUX_VERSION_CODE < 0x020400
+ if (lock.context != radeon_res_ctx.handle) {
+ current->counter = 5;
+ current->priority = DEF_PRIORITY;
+ }
+#endif
+ unblock_all_signals();
+ return 0;
+}
diff --git a/linux/radeon_drv.h b/linux/radeon_drv.h
index f176bb560..06b541991 100644
--- a/linux/radeon_drv.h
+++ b/linux/radeon_drv.h
@@ -24,8 +24,10 @@
* DEALINGS IN THE SOFTWARE.
*
* Authors:
- * Kevin E. Martin <martin@valinux.com>
- * Gareth Hughes <gareth@valinux.com>
+ * Rickard E. (Rik) Faith <faith@valinux.com>
+ * Kevin E. Martin <martin@valinux.com>
+ * Gareth Hughes <gareth@valinux.com>
+ *
*/
#ifndef __RADEON_DRV_H__
@@ -48,8 +50,6 @@ typedef struct drm_radeon_ring_buffer {
u32 tail;
u32 tail_mask;
int space;
-
- int high_mark;
} drm_radeon_ring_buffer_t;
typedef struct drm_radeon_depth_clear_t {
@@ -91,13 +91,13 @@ typedef struct drm_radeon_private {
u32 crtc_offset;
u32 crtc_offset_cntl;
- u32 color_fmt;
+ unsigned int color_fmt;
unsigned int front_offset;
unsigned int front_pitch;
unsigned int back_offset;
unsigned int back_pitch;
- u32 depth_fmt;
+ unsigned int depth_fmt;
unsigned int depth_offset;
unsigned int depth_pitch;
@@ -124,6 +124,18 @@ typedef struct drm_radeon_buf_priv {
drm_radeon_freelist_t *list_entry;
} drm_radeon_buf_priv_t;
+ /* radeon_drv.c */
+extern int radeon_version( struct inode *inode, struct file *filp,
+ unsigned int cmd, unsigned long arg );
+extern int radeon_open( struct inode *inode, struct file *filp );
+extern int radeon_release( struct inode *inode, struct file *filp );
+extern int radeon_ioctl( struct inode *inode, struct file *filp,
+ unsigned int cmd, unsigned long arg );
+extern int radeon_lock( struct inode *inode, struct file *filp,
+ unsigned int cmd, unsigned long arg );
+extern int radeon_unlock( struct inode *inode, struct file *filp,
+ unsigned int cmd, unsigned long arg );
+
/* radeon_cp.c */
extern int radeon_cp_init( struct inode *inode, struct file *filp,
unsigned int cmd, unsigned long arg );
@@ -146,17 +158,9 @@ extern void radeon_freelist_reset( drm_device_t *dev );
extern drm_buf_t *radeon_freelist_get( drm_device_t *dev );
extern int radeon_wait_ring( drm_radeon_private_t *dev_priv, int n );
-
-static inline void
-radeon_update_ring_snapshot( drm_radeon_ring_buffer_t *ring )
-{
- ring->space = (*(volatile int *)ring->head - ring->tail) * sizeof(u32);
- if ( ring->space <= 0 )
- ring->space += ring->size;
-}
+extern void radeon_update_ring_snapshot( drm_radeon_private_t *dev_priv );
extern int radeon_do_cp_idle( drm_radeon_private_t *dev_priv );
-extern int radeon_do_cleanup_cp( drm_device_t *dev );
extern int radeon_do_cleanup_pageflip( drm_device_t *dev );
/* radeon_state.c */
@@ -168,13 +172,38 @@ extern int radeon_cp_vertex( struct inode *inode, struct file *filp,
unsigned int cmd, unsigned long arg );
extern int radeon_cp_indices( struct inode *inode, struct file *filp,
unsigned int cmd, unsigned long arg );
-extern int radeon_cp_texture( struct inode *inode, struct file *filp,
- unsigned int cmd, unsigned long arg );
+extern int radeon_cp_blit( struct inode *inode, struct file *filp,
+ unsigned int cmd, unsigned long arg );
extern int radeon_cp_stipple( struct inode *inode, struct file *filp,
unsigned int cmd, unsigned long arg );
extern int radeon_cp_indirect( struct inode *inode, struct file *filp,
unsigned int cmd, unsigned long arg );
+ /* radeon_bufs.c */
+extern int radeon_addbufs(struct inode *inode, struct file *filp,
+ unsigned int cmd, unsigned long arg);
+extern int radeon_mapbufs(struct inode *inode, struct file *filp,
+ unsigned int cmd, unsigned long arg);
+
+ /* radeon_context.c */
+extern int radeon_resctx(struct inode *inode, struct file *filp,
+ unsigned int cmd, unsigned long arg);
+extern int radeon_addctx(struct inode *inode, struct file *filp,
+ unsigned int cmd, unsigned long arg);
+extern int radeon_modctx(struct inode *inode, struct file *filp,
+ unsigned int cmd, unsigned long arg);
+extern int radeon_getctx(struct inode *inode, struct file *filp,
+ unsigned int cmd, unsigned long arg);
+extern int radeon_switchctx(struct inode *inode, struct file *filp,
+ unsigned int cmd, unsigned long arg);
+extern int radeon_newctx(struct inode *inode, struct file *filp,
+ unsigned int cmd, unsigned long arg);
+extern int radeon_rmctx(struct inode *inode, struct file *filp,
+ unsigned int cmd, unsigned long arg);
+
+extern int radeon_context_switch(drm_device_t *dev, int old, int new);
+extern int radeon_context_switch_complete(drm_device_t *dev, int new);
+
/* Register definitions, register access macros and drmAddMap constants
* for Radeon kernel driver.
@@ -485,14 +514,14 @@ extern int radeon_cp_indirect( struct inode *inode, struct file *filp,
#define RADEON_COLOR_FORMAT_RGB8 9
#define RADEON_COLOR_FORMAT_ARGB4444 15
-#define RADEON_TXFORMAT_I8 0
-#define RADEON_TXFORMAT_AI88 1
-#define RADEON_TXFORMAT_RGB332 2
-#define RADEON_TXFORMAT_ARGB1555 3
-#define RADEON_TXFORMAT_RGB565 4
-#define RADEON_TXFORMAT_ARGB4444 5
-#define RADEON_TXFORMAT_ARGB8888 6
-#define RADEON_TXFORMAT_RGBA8888 7
+#define RADEON_TXF_8BPP_I 0
+#define RADEON_TXF_16BPP_AI88 1
+#define RADEON_TXF_8BPP_RGB332 2
+#define RADEON_TXF_16BPP_ARGB1555 3
+#define RADEON_TXF_16BPP_RGB565 4
+#define RADEON_TXF_16BPP_ARGB4444 5
+#define RADEON_TXF_32BPP_ARGB8888 6
+#define RADEON_TXF_32BPP_RGBA8888 7
/* Constants */
#define RADEON_MAX_USEC_TIMEOUT 100000 /* 100 ms */
@@ -505,27 +534,27 @@ extern int radeon_cp_indirect( struct inode *inode, struct file *filp,
#define RADEON_MAX_VB_AGE 0x7fffffff
#define RADEON_MAX_VB_VERTS (0xffff)
-#define RADEON_RING_HIGH_MARK 128
-
#define RADEON_BASE(reg) ((u32)(dev_priv->mmio->handle))
-#define RADEON_ADDR(reg) (RADEON_BASE( reg ) + reg)
+#define RADEON_ADDR(reg) (RADEON_BASE(reg) + reg)
-#define RADEON_DEREF(reg) *(volatile u32 *)RADEON_ADDR( reg )
-#define RADEON_READ(reg) RADEON_DEREF( reg )
-#define RADEON_WRITE(reg, val) do { RADEON_DEREF( reg ) = val; } while (0)
+#define RADEON_DEREF(reg) *(__volatile__ u32 *)RADEON_ADDR(reg)
+#define RADEON_READ(reg) RADEON_DEREF(reg)
+#define RADEON_WRITE(reg,val) do { RADEON_DEREF(reg) = val; } while (0)
-#define RADEON_DEREF8(reg) *(volatile u8 *)RADEON_ADDR( reg )
-#define RADEON_READ8(reg) RADEON_DEREF8( reg )
-#define RADEON_WRITE8(reg, val) do { RADEON_DEREF8( reg ) = val; } while (0)
+#define RADEON_DEREF8(reg) *(__volatile__ u8 *)RADEON_ADDR(reg)
+#define RADEON_READ8(reg) RADEON_DEREF8(reg)
+#define RADEON_WRITE8(reg,val) do { RADEON_DEREF8(reg) = val; } while (0)
-#define RADEON_WRITE_PLL( addr, val ) do { \
- RADEON_WRITE8( RADEON_CLOCK_CNTL_INDEX, \
- ((addr) & 0x1f) | RADEON_PLL_WR_EN ); \
- RADEON_WRITE( RADEON_CLOCK_CNTL_DATA, (val) ); \
+#define RADEON_WRITE_PLL(addr,val) \
+do { \
+ RADEON_WRITE8(RADEON_CLOCK_CNTL_INDEX, \
+ ((addr) & 0x1f) | RADEON_PLL_WR_EN); \
+ RADEON_WRITE(RADEON_CLOCK_CNTL_DATA, (val)); \
} while (0)
-extern int RADEON_READ_PLL( drm_device_t *dev, int addr );
+extern int RADEON_READ_PLL(drm_device_t *dev, int addr);
+
#define CP_PACKET0( reg, n ) \
@@ -544,46 +573,54 @@ extern int RADEON_READ_PLL( drm_device_t *dev, int addr );
* Engine control helper macros
*/
-#define RADEON_WAIT_UNTIL_2D_IDLE() do { \
+#define RADEON_WAIT_UNTIL_2D_IDLE() \
+do { \
OUT_RING( CP_PACKET0( RADEON_WAIT_UNTIL, 0 ) ); \
OUT_RING( (RADEON_WAIT_2D_IDLECLEAN | \
RADEON_WAIT_HOST_IDLECLEAN) ); \
} while (0)
-#define RADEON_WAIT_UNTIL_3D_IDLE() do { \
+#define RADEON_WAIT_UNTIL_3D_IDLE() \
+do { \
OUT_RING( CP_PACKET0( RADEON_WAIT_UNTIL, 0 ) ); \
OUT_RING( (RADEON_WAIT_3D_IDLECLEAN | \
RADEON_WAIT_HOST_IDLECLEAN) ); \
} while (0)
-#define RADEON_WAIT_UNTIL_IDLE() do { \
+#define RADEON_WAIT_UNTIL_IDLE() \
+do { \
OUT_RING( CP_PACKET0( RADEON_WAIT_UNTIL, 0 ) ); \
OUT_RING( (RADEON_WAIT_2D_IDLECLEAN | \
RADEON_WAIT_3D_IDLECLEAN | \
RADEON_WAIT_HOST_IDLECLEAN) ); \
} while (0)
-#define RADEON_WAIT_UNTIL_PAGE_FLIPPED() do { \
+#define RADEON_WAIT_UNTIL_PAGE_FLIPPED() \
+do { \
OUT_RING( CP_PACKET0( RADEON_WAIT_UNTIL, 0 ) ); \
OUT_RING( RADEON_WAIT_CRTC_PFLIP ); \
} while (0)
-#define RADEON_FLUSH_CACHE() do { \
+#define RADEON_FLUSH_CACHE() \
+do { \
OUT_RING( CP_PACKET0( RADEON_RB2D_DSTCACHE_CTLSTAT, 0 ) ); \
OUT_RING( RADEON_RB2D_DC_FLUSH ); \
} while (0)
-#define RADEON_PURGE_CACHE() do { \
+#define RADEON_PURGE_CACHE() \
+do { \
OUT_RING( CP_PACKET0( RADEON_RB2D_DSTCACHE_CTLSTAT, 0 ) ); \
OUT_RING( RADEON_RB2D_DC_FLUSH_ALL ); \
} while (0)
-#define RADEON_FLUSH_ZCACHE() do { \
+#define RADEON_FLUSH_ZCACHE() \
+do { \
OUT_RING( CP_PACKET0( RADEON_RB3D_ZCACHE_CTLSTAT, 0 ) ); \
OUT_RING( RADEON_RB3D_ZC_FLUSH ); \
} while (0)
-#define RADEON_PURGE_ZCACHE() do { \
+#define RADEON_PURGE_ZCACHE() \
+do { \
OUT_RING( CP_PACKET0( RADEON_RB3D_ZCACHE_CTLSTAT, 0 ) ); \
OUT_RING( RADEON_RB3D_ZC_FLUSH_ALL ); \
} while (0)
@@ -593,33 +630,7 @@ extern int RADEON_READ_PLL( drm_device_t *dev, int addr );
* Misc helper macros
*/
-#define LOCK_TEST_WITH_RETURN( dev ) \
-do { \
- if ( !_DRM_LOCK_IS_HELD( dev->lock.hw_lock->lock ) || \
- dev->lock.pid != current->pid ) { \
- DRM_ERROR( "%s called without lock held\n", \
- __FUNCTION__ ); \
- return -EINVAL; \
- } \
-} while (0)
-
-#define RING_SPACE_TEST_WITH_RETURN( dev_priv ) \
-do { \
- drm_radeon_ring_buffer_t *ring = &dev_priv->ring; int i; \
- if ( ring->space < ring->high_mark ) { \
- for ( i = 0 ; i < dev_priv->usec_timeout ; i++ ) { \
- radeon_update_ring_snapshot( ring ); \
- if ( ring->space >= ring->high_mark ) \
- goto __ring_space_done; \
- udelay( 1 ); \
- } \
- DRM_ERROR( "ring space check failed!\n" ); \
- return -EBUSY; \
- } \
- __ring_space_done: \
-} while (0)
-
-#define VB_AGE_TEST_WITH_RETURN( dev_priv ) \
+#define VB_AGE_CHECK_WITH_RET( dev_priv ) \
do { \
drm_radeon_sarea_t *sarea_priv = dev_priv->sarea_priv; \
if ( sarea_priv->last_dispatch >= RADEON_MAX_VB_AGE ) { \
@@ -630,17 +641,20 @@ do { \
} \
} while (0)
-#define RADEON_DISPATCH_AGE( age ) do { \
+#define RADEON_DISPATCH_AGE( age ) \
+do { \
OUT_RING( CP_PACKET0( RADEON_LAST_DISPATCH_REG, 0 ) ); \
OUT_RING( age ); \
} while (0)
-#define RADEON_FRAME_AGE( age ) do { \
+#define RADEON_FRAME_AGE( age ) \
+do { \
OUT_RING( CP_PACKET0( RADEON_LAST_FRAME_REG, 0 ) ); \
OUT_RING( age ); \
} while (0)
-#define RADEON_CLEAR_AGE( age ) do { \
+#define RADEON_CLEAR_AGE( age ) \
+do { \
OUT_RING( CP_PACKET0( RADEON_LAST_CLEAR_REG, 0 ) ); \
OUT_RING( age ); \
} while (0)
@@ -662,7 +676,7 @@ do { \
DRM_INFO( "BEGIN_RING( %d ) in %s\n", \
n, __FUNCTION__ ); \
} \
- if ( dev_priv->ring.space <= (n) * sizeof(u32) ) { \
+ if ( dev_priv->ring.space < (n) * sizeof(u32) ) { \
radeon_wait_ring( dev_priv, (n) * sizeof(u32) ); \
} \
dev_priv->ring.space -= (n) * sizeof(u32); \
diff --git a/linux/radeon_state.c b/linux/radeon_state.c
index 9360c43b0..7bfefb2ca 100644
--- a/linux/radeon_state.c
+++ b/linux/radeon_state.c
@@ -23,12 +23,12 @@
* DEALINGS IN THE SOFTWARE.
*
* Authors:
- * Gareth Hughes <gareth@valinux.com>
* Kevin E. Martin <martin@valinux.com>
+ * Gareth Hughes <gareth@valinux.com>
+ *
*/
#define __NO_VERSION__
-#include "radeon.h"
#include "drmP.h"
#include "radeon_drv.h"
#include "drm.h"
@@ -486,8 +486,7 @@ static void radeon_print_dirty( const char *msg, unsigned int flags )
}
static void radeon_cp_dispatch_clear( drm_device_t *dev,
- drm_radeon_clear_t *clear,
- drm_radeon_clear_rect_t *depth_boxes )
+ drm_radeon_clear_t *clear )
{
drm_radeon_private_t *dev_priv = dev->dev_private;
drm_radeon_sarea_t *sarea_priv = dev_priv->sarea_priv;
@@ -498,6 +497,8 @@ static void radeon_cp_dispatch_clear( drm_device_t *dev,
RING_LOCALS;
DRM_DEBUG( "%s\n", __FUNCTION__ );
+ radeon_update_ring_snapshot( dev_priv );
+
if ( dev_priv->page_flipping && dev_priv->current_page == 1 ) {
unsigned int tmp = flags;
@@ -524,7 +525,7 @@ static void radeon_cp_dispatch_clear( drm_device_t *dev,
RADEON_WAIT_UNTIL_3D_IDLE();
OUT_RING( CP_PACKET0( RADEON_DP_WRITE_MASK, 0 ) );
- OUT_RING( clear->color_mask );
+ OUT_RING( sarea_priv->context_state.rb3d_planemask );
ADVANCE_RING();
@@ -608,17 +609,17 @@ static void radeon_cp_dispatch_clear( drm_device_t *dev,
RADEON_VTX_FMT_RADEON_MODE |
(3 << RADEON_NUM_VERTICES_SHIFT)) );
- OUT_RING( depth_boxes[i].ui[CLEAR_X1] );
- OUT_RING( depth_boxes[i].ui[CLEAR_Y1] );
- OUT_RING( depth_boxes[i].ui[CLEAR_DEPTH] );
+ OUT_RING( clear->rect.ui[CLEAR_X1] );
+ OUT_RING( clear->rect.ui[CLEAR_Y1] );
+ OUT_RING( clear->rect.ui[CLEAR_DEPTH] );
- OUT_RING( depth_boxes[i].ui[CLEAR_X1] );
- OUT_RING( depth_boxes[i].ui[CLEAR_Y2] );
- OUT_RING( depth_boxes[i].ui[CLEAR_DEPTH] );
+ OUT_RING( clear->rect.ui[CLEAR_X1] );
+ OUT_RING( clear->rect.ui[CLEAR_Y2] );
+ OUT_RING( clear->rect.ui[CLEAR_DEPTH] );
- OUT_RING( depth_boxes[i].ui[CLEAR_X2] );
- OUT_RING( depth_boxes[i].ui[CLEAR_Y2] );
- OUT_RING( depth_boxes[i].ui[CLEAR_DEPTH] );
+ OUT_RING( clear->rect.ui[CLEAR_X2] );
+ OUT_RING( clear->rect.ui[CLEAR_Y2] );
+ OUT_RING( clear->rect.ui[CLEAR_DEPTH] );
ADVANCE_RING();
@@ -654,6 +655,8 @@ static void radeon_cp_dispatch_swap( drm_device_t *dev )
RING_LOCALS;
DRM_DEBUG( "%s\n", __FUNCTION__ );
+ radeon_update_ring_snapshot( dev_priv );
+
#if RADEON_PERFORMANCE_BOXES
/* Do some trivial performance monitoring...
*/
@@ -721,6 +724,8 @@ static void radeon_cp_dispatch_flip( drm_device_t *dev )
RING_LOCALS;
DRM_DEBUG( "%s: page=%d\n", __FUNCTION__, dev_priv->current_page );
+ radeon_update_ring_snapshot( dev_priv );
+
#if RADEON_PERFORMANCE_BOXES
/* Do some trivial performance monitoring...
*/
@@ -771,7 +776,9 @@ static void radeon_cp_dispatch_vertex( drm_device_t *dev,
RING_LOCALS;
DRM_DEBUG( "%s: nbox=%d\n", __FUNCTION__, sarea_priv->nbox );
- if ( 0 )
+ radeon_update_ring_snapshot( dev_priv );
+
+ if ( 1 )
radeon_print_dirty( "dispatch_vertex", sarea_priv->dirty );
if ( buf->used ) {
@@ -837,6 +844,8 @@ static void radeon_cp_dispatch_indirect( drm_device_t *dev,
DRM_DEBUG( "indirect: buf=%d s=0x%x e=0x%x\n",
buf->idx, start, end );
+ radeon_update_ring_snapshot( dev_priv );
+
if ( start != end ) {
int offset = (dev_priv->agp_buffers_offset
+ buf->offset + start);
@@ -899,6 +908,8 @@ static void radeon_cp_dispatch_indices( drm_device_t *dev,
RING_LOCALS;
DRM_DEBUG( "indices: s=%d e=%d c=%d\n", start, end, count );
+ radeon_update_ring_snapshot( dev_priv );
+
if ( 0 )
radeon_print_dirty( "dispatch_indices", sarea_priv->dirty );
@@ -960,67 +971,50 @@ static void radeon_cp_dispatch_indices( drm_device_t *dev,
sarea_priv->nbox = 0;
}
-#define RADEON_MAX_TEXTURE_SIZE (RADEON_BUFFER_SIZE - 8 * sizeof(u32))
-
-static int radeon_cp_dispatch_texture( drm_device_t *dev,
- drm_radeon_texture_t *tex,
- drm_radeon_tex_image_t *image )
+static int radeon_cp_dispatch_blit( drm_device_t *dev,
+ drm_radeon_blit_t *blit )
{
drm_radeon_private_t *dev_priv = dev->dev_private;
+ drm_device_dma_t *dma = dev->dma;
drm_buf_t *buf;
drm_radeon_buf_priv_t *buf_priv;
u32 format;
- u32 *buffer;
- u8 *data;
- int size, dwords, tex_width, blit_width;
- u32 y, height;
- int ret = 0, i;
+ u32 *data;
+ int dword_shift, dwords;
RING_LOCALS;
+ DRM_DEBUG( "blit: ofs=0x%x p=%d f=%d x=%hd y=%hd w=%hd h=%hd\n",
+ blit->offset >> 10, blit->pitch, blit->format,
+ blit->x, blit->y, blit->width, blit->height );
- /* FIXME: Be smarter about this...
- */
- buf = radeon_freelist_get( dev );
- if ( !buf ) return -EAGAIN;
-
- DRM_DEBUG( "tex: ofs=0x%x p=%d f=%d x=%hd y=%hd w=%hd h=%hd\n",
- tex->offset >> 10, tex->pitch, tex->format,
- image->x, image->y, image->width, image->height );
-
- buf_priv = buf->dev_private;
+ radeon_update_ring_snapshot( dev_priv );
/* The compiler won't optimize away a division by a variable,
* even if the only legal values are powers of two. Thus, we'll
* use a shift instead.
*/
- switch ( tex->format ) {
- case RADEON_TXFORMAT_ARGB8888:
- case RADEON_TXFORMAT_RGBA8888:
+ switch ( blit->format ) {
+ case RADEON_TXF_32BPP_ARGB8888:
+ case RADEON_TXF_32BPP_RGBA8888:
format = RADEON_COLOR_FORMAT_ARGB8888;
- tex_width = tex->width * 4;
- blit_width = image->width * 4;
+ dword_shift = 0;
break;
- case RADEON_TXFORMAT_AI88:
- case RADEON_TXFORMAT_ARGB1555:
- case RADEON_TXFORMAT_RGB565:
- case RADEON_TXFORMAT_ARGB4444:
+ case RADEON_TXF_16BPP_AI88:
+ case RADEON_TXF_16BPP_ARGB1555:
+ case RADEON_TXF_16BPP_RGB565:
+ case RADEON_TXF_16BPP_ARGB4444:
format = RADEON_COLOR_FORMAT_RGB565;
- tex_width = tex->width * 2;
- blit_width = image->width * 2;
+ dword_shift = 1;
break;
- case RADEON_TXFORMAT_I8:
- case RADEON_TXFORMAT_RGB332:
+ case RADEON_TXF_8BPP_I:
+ case RADEON_TXF_8BPP_RGB332:
format = RADEON_COLOR_FORMAT_CI8;
- tex_width = tex->width * 1;
- blit_width = image->width * 1;
+ dword_shift = 2;
break;
default:
- DRM_ERROR( "invalid texture format %d\n", tex->format );
+ DRM_ERROR( "invalid blit format %d\n", blit->format );
return -EINVAL;
}
- DRM_DEBUG( " tex=%dx%d blit=%d\n",
- tex_width, tex->height, blit_width );
-
/* Flush the pixel cache. This ensures no pixel data gets mixed
* up with the texture data from the host data blit, otherwise
* part of the texture image may be corrupted.
@@ -1032,88 +1026,46 @@ static int radeon_cp_dispatch_texture( drm_device_t *dev,
ADVANCE_RING();
- /* Make a copy of the parameters in case we have to update them
- * for a multi-pass texture blit.
+ /* Dispatch the indirect buffer.
*/
- y = image->y;
- height = image->height;
- data = (u8 *)image->data;
+ buf = dma->buflist[blit->idx];
+ buf_priv = buf->dev_private;
- size = height * blit_width;
+ if ( buf->pid != current->pid ) {
+ DRM_ERROR( "process %d using buffer owned by %d\n",
+ current->pid, buf->pid );
+ return -EINVAL;
+ }
+ if ( buf->pending ) {
+ DRM_ERROR( "sending pending buffer %d\n", blit->idx );
+ return -EINVAL;
+ }
- if ( size > RADEON_MAX_TEXTURE_SIZE ) {
- /* Texture image is too large, do a multipass upload */
- ret = -EAGAIN;
+ buf_priv->discard = 1;
- /* Adjust the blit size to fit the indirect buffer */
- height = RADEON_MAX_TEXTURE_SIZE / blit_width;
- size = height * blit_width;
+ dwords = (blit->width * blit->height) >> dword_shift;
+ if ( !dwords ) dwords = 1;
- /* Update the input parameters for next time */
- image->y += height;
- image->height -= height;
- image->data = (char *)image->data + size;
+ data = (u32 *)((char *)dev_priv->buffers->handle + buf->offset);
- if ( copy_to_user( tex->image, image, sizeof(*image) ) ) {
- DRM_ERROR( "EFAULT on tex->image\n" );
- return -EFAULT;
- }
- } else if ( size < 4 ) {
- size = 4;
- }
+ data[0] = CP_PACKET3( RADEON_CNTL_HOSTDATA_BLT, dwords + 6 );
+ data[1] = (RADEON_GMC_DST_PITCH_OFFSET_CNTL |
+ RADEON_GMC_BRUSH_NONE |
+ (format << 8) |
+ RADEON_GMC_SRC_DATATYPE_COLOR |
+ RADEON_ROP3_S |
+ RADEON_DP_SRC_SOURCE_HOST_DATA |
+ RADEON_GMC_CLR_CMP_CNTL_DIS |
+ RADEON_GMC_WR_MSK_DIS);
- dwords = size / 4;
+ data[2] = (blit->pitch << 22) | (blit->offset >> 10);
+ data[3] = 0xffffffff;
+ data[4] = 0xffffffff;
+ data[5] = (blit->y << 16) | blit->x;
+ data[6] = (blit->height << 16) | blit->width;
+ data[7] = dwords;
- /* Dispatch the indirect buffer.
- */
- buffer = (u32 *)((char *)dev_priv->buffers->handle + buf->offset);
-
- buffer[0] = CP_PACKET3( RADEON_CNTL_HOSTDATA_BLT, dwords + 6 );
- buffer[1] = (RADEON_GMC_DST_PITCH_OFFSET_CNTL |
- RADEON_GMC_BRUSH_NONE |
- (format << 8) |
- RADEON_GMC_SRC_DATATYPE_COLOR |
- RADEON_ROP3_S |
- RADEON_DP_SRC_SOURCE_HOST_DATA |
- RADEON_GMC_CLR_CMP_CNTL_DIS |
- RADEON_GMC_WR_MSK_DIS);
-
- buffer[2] = (tex->pitch << 22) | (tex->offset >> 10);
- buffer[3] = 0xffffffff;
- buffer[4] = 0xffffffff;
- buffer[5] = (y << 16) | image->x;
- buffer[6] = (height << 16) | image->width;
- buffer[7] = dwords;
-
- buffer += 8;
-
- if ( tex_width >= 32 ) {
- /* Texture image width is larger than the minimum, so we
- * can upload it directly.
- */
- if ( copy_from_user( buffer, data, dwords * sizeof(u32) ) ) {
- DRM_ERROR( "EFAULT on data, %d dwords\n", dwords );
- return -EFAULT;
- }
- } else {
- /* Texture image width is less than the minimum, so we
- * need to pad out each image scanline to the minimum
- * width.
- */
- for ( i = 0 ; i < tex->height ; i++ ) {
- if ( copy_from_user( buffer, data, tex_width ) ) {
- DRM_ERROR( "EFAULT on pad, %d bytes\n",
- tex_width );
- return -EFAULT;
- }
- buffer += 8;
- data += tex_width;
- }
- }
-
- buf->pid = current->pid;
buf->used = (dwords + 8) * sizeof(u32);
- buf_priv->discard = 1;
radeon_cp_dispatch_indirect( dev, buf, 0, buf->used );
@@ -1128,7 +1080,7 @@ static int radeon_cp_dispatch_texture( drm_device_t *dev,
ADVANCE_RING();
- return ret;
+ return 0;
}
static void radeon_cp_dispatch_stipple( drm_device_t *dev, u32 *stipple )
@@ -1138,6 +1090,8 @@ static void radeon_cp_dispatch_stipple( drm_device_t *dev, u32 *stipple )
RING_LOCALS;
DRM_DEBUG( "%s\n", __FUNCTION__ );
+ radeon_update_ring_snapshot( dev_priv );
+
BEGIN_RING( 35 );
OUT_RING( CP_PACKET0( RADEON_RE_STIPPLE_ADDR, 0 ) );
@@ -1164,25 +1118,22 @@ int radeon_cp_clear( struct inode *inode, struct file *filp,
drm_radeon_private_t *dev_priv = dev->dev_private;
drm_radeon_sarea_t *sarea_priv = dev_priv->sarea_priv;
drm_radeon_clear_t clear;
- drm_radeon_clear_rect_t depth_boxes[RADEON_NR_SAREA_CLIPRECTS];
DRM_DEBUG( "%s\n", __FUNCTION__ );
- LOCK_TEST_WITH_RETURN( dev );
+ if ( !_DRM_LOCK_IS_HELD( dev->lock.hw_lock->lock ) ||
+ dev->lock.pid != current->pid ) {
+ DRM_ERROR( "%s called without lock held\n", __FUNCTION__ );
+ return -EINVAL;
+ }
- if ( copy_from_user( &clear, (drm_radeon_clear_t *)arg,
+ if ( copy_from_user( &clear, (drm_radeon_clear_t *) arg,
sizeof(clear) ) )
return -EFAULT;
- RING_SPACE_TEST_WITH_RETURN( dev_priv );
-
if ( sarea_priv->nbox > RADEON_NR_SAREA_CLIPRECTS )
sarea_priv->nbox = RADEON_NR_SAREA_CLIPRECTS;
- if ( copy_from_user( &depth_boxes, clear.depth_boxes,
- sarea_priv->nbox * sizeof(depth_boxes[0]) ) )
- return -EFAULT;
-
- radeon_cp_dispatch_clear( dev, &clear, depth_boxes );
+ radeon_cp_dispatch_clear( dev, &clear );
return 0;
}
@@ -1196,9 +1147,11 @@ int radeon_cp_swap( struct inode *inode, struct file *filp,
drm_radeon_sarea_t *sarea_priv = dev_priv->sarea_priv;
DRM_DEBUG( "%s\n", __FUNCTION__ );
- LOCK_TEST_WITH_RETURN( dev );
-
- RING_SPACE_TEST_WITH_RETURN( dev_priv );
+ if ( !_DRM_LOCK_IS_HELD( dev->lock.hw_lock->lock ) ||
+ dev->lock.pid != current->pid ) {
+ DRM_ERROR( "%s called without lock held\n", __FUNCTION__ );
+ return -EINVAL;
+ }
if ( sarea_priv->nbox > RADEON_NR_SAREA_CLIPRECTS )
sarea_priv->nbox = RADEON_NR_SAREA_CLIPRECTS;
@@ -1225,8 +1178,11 @@ int radeon_cp_vertex( struct inode *inode, struct file *filp,
drm_radeon_buf_priv_t *buf_priv;
drm_radeon_vertex_t vertex;
- LOCK_TEST_WITH_RETURN( dev );
-
+ if ( !_DRM_LOCK_IS_HELD( dev->lock.hw_lock->lock ) ||
+ dev->lock.pid != current->pid ) {
+ DRM_ERROR( "%s called without lock held\n", __FUNCTION__ );
+ return -EINVAL;
+ }
if ( !dev_priv || dev_priv->is_pci ) {
DRM_ERROR( "%s called with a PCI card\n", __FUNCTION__ );
return -EINVAL;
@@ -1251,8 +1207,7 @@ int radeon_cp_vertex( struct inode *inode, struct file *filp,
return -EINVAL;
}
- RING_SPACE_TEST_WITH_RETURN( dev_priv );
- VB_AGE_TEST_WITH_RETURN( dev_priv );
+ VB_AGE_CHECK_WITH_RET( dev_priv );
buf = dma->buflist[vertex.idx];
buf_priv = buf->dev_private;
@@ -1288,8 +1243,11 @@ int radeon_cp_indices( struct inode *inode, struct file *filp,
drm_radeon_indices_t elts;
int count;
- LOCK_TEST_WITH_RETURN( dev );
-
+ if ( !_DRM_LOCK_IS_HELD( dev->lock.hw_lock->lock ) ||
+ dev->lock.pid != current->pid ) {
+ DRM_ERROR( "%s called without lock held\n", __FUNCTION__ );
+ return -EINVAL;
+ }
if ( !dev_priv || dev_priv->is_pci ) {
DRM_ERROR( "%s called with a PCI card\n", __FUNCTION__ );
return -EINVAL;
@@ -1314,8 +1272,7 @@ int radeon_cp_indices( struct inode *inode, struct file *filp,
return -EINVAL;
}
- RING_SPACE_TEST_WITH_RETURN( dev_priv );
- VB_AGE_TEST_WITH_RETURN( dev_priv );
+ VB_AGE_CHECK_WITH_RET( dev_priv );
buf = dma->buflist[elts.idx];
buf_priv = buf->dev_private;
@@ -1351,34 +1308,37 @@ int radeon_cp_indices( struct inode *inode, struct file *filp,
return 0;
}
-int radeon_cp_texture( struct inode *inode, struct file *filp,
- unsigned int cmd, unsigned long arg )
+int radeon_cp_blit( struct inode *inode, struct file *filp,
+ unsigned int cmd, unsigned long arg )
{
drm_file_t *priv = filp->private_data;
drm_device_t *dev = priv->dev;
drm_radeon_private_t *dev_priv = dev->dev_private;
- drm_radeon_texture_t tex;
- drm_radeon_tex_image_t image;
+ drm_device_dma_t *dma = dev->dma;
+ drm_radeon_blit_t blit;
- LOCK_TEST_WITH_RETURN( dev );
+ if ( !_DRM_LOCK_IS_HELD( dev->lock.hw_lock->lock ) ||
+ dev->lock.pid != current->pid ) {
+ DRM_ERROR( "%s called without lock held\n", __FUNCTION__ );
+ return -EINVAL;
+ }
- if ( copy_from_user( &tex, (drm_radeon_texture_t *)arg, sizeof(tex) ) )
+ if ( copy_from_user( &blit, (drm_radeon_blit_t *)arg,
+ sizeof(blit) ) )
return -EFAULT;
- if ( tex.image == NULL ) {
- DRM_ERROR( "null texture image!\n" );
+ DRM_DEBUG( "%s: pid=%d index=%d\n",
+ __FUNCTION__, current->pid, blit.idx );
+
+ if ( blit.idx < 0 || blit.idx > dma->buf_count ) {
+ DRM_ERROR( "sending %d buffers (of %d max)\n",
+ blit.idx, dma->buf_count );
return -EINVAL;
}
- if ( copy_from_user( &image,
- (drm_radeon_tex_image_t *)tex.image,
- sizeof(image) ) )
- return -EFAULT;
-
- RING_SPACE_TEST_WITH_RETURN( dev_priv );
- VB_AGE_TEST_WITH_RETURN( dev_priv );
+ VB_AGE_CHECK_WITH_RET( dev_priv );
- return radeon_cp_dispatch_texture( dev, &tex, &image );
+ return radeon_cp_dispatch_blit( dev, &blit );
}
int radeon_cp_stipple( struct inode *inode, struct file *filp,
@@ -1386,21 +1346,23 @@ int radeon_cp_stipple( struct inode *inode, struct file *filp,
{
drm_file_t *priv = filp->private_data;
drm_device_t *dev = priv->dev;
- drm_radeon_private_t *dev_priv = dev->dev_private;
drm_radeon_stipple_t stipple;
u32 mask[32];
- LOCK_TEST_WITH_RETURN( dev );
+ if ( !_DRM_LOCK_IS_HELD( dev->lock.hw_lock->lock ) ||
+ dev->lock.pid != current->pid ) {
+ DRM_ERROR( "%s called without lock held\n", __FUNCTION__ );
+ return -EINVAL;
+ }
if ( copy_from_user( &stipple, (drm_radeon_stipple_t *)arg,
sizeof(stipple) ) )
return -EFAULT;
- if ( copy_from_user( &mask, stipple.mask, 32 * sizeof(u32) ) )
+ if ( copy_from_user( &mask, stipple.mask,
+ 32 * sizeof(u32) ) )
return -EFAULT;
- RING_SPACE_TEST_WITH_RETURN( dev_priv );
-
radeon_cp_dispatch_stipple( dev, mask );
return 0;
@@ -1418,8 +1380,11 @@ int radeon_cp_indirect( struct inode *inode, struct file *filp,
drm_radeon_indirect_t indirect;
RING_LOCALS;
- LOCK_TEST_WITH_RETURN( dev );
-
+ if ( !_DRM_LOCK_IS_HELD( dev->lock.hw_lock->lock ) ||
+ dev->lock.pid != current->pid ) {
+ DRM_ERROR( "%s called without lock held\n", __FUNCTION__ );
+ return -EINVAL;
+ }
if ( !dev_priv || dev_priv->is_pci ) {
DRM_ERROR( "%s called with a PCI card\n", __FUNCTION__ );
return -EINVAL;
@@ -1458,8 +1423,7 @@ int radeon_cp_indirect( struct inode *inode, struct file *filp,
return -EINVAL;
}
- RING_SPACE_TEST_WITH_RETURN( dev_priv );
- VB_AGE_TEST_WITH_RETURN( dev_priv );
+ VB_AGE_CHECK_WITH_RET( dev_priv );
buf->used = indirect.end;
buf_priv->discard = indirect.discard;