From 9d0f33b178abb871f07c88a928f3fb288664574a Mon Sep 17 00:00:00 2001 From: Thomas Hellstrom Date: Mon, 4 May 2009 22:43:56 +0200 Subject: ttm: Replace the ttm_buffer_object::mutex with a spinlock. Signed-off-by: Thomas Hellstrom --- linux-core/openchrome/via_execbuf.c | 11 +-- linux-core/ttm/ttm_bo.c | 186 ++++++++++++++---------------------- linux-core/ttm/ttm_bo_api.h | 21 ++-- linux-core/ttm/ttm_bo_util.c | 28 ++++-- linux-core/ttm/ttm_bo_vm.c | 18 ++-- linux-core/ttm/ttm_execbuf_util.c | 4 +- linux-core/ttm/ttm_placement_user.c | 19 ++-- 7 files changed, 128 insertions(+), 159 deletions(-) diff --git a/linux-core/openchrome/via_execbuf.c b/linux-core/openchrome/via_execbuf.c index 12009df7..468c9c23 100644 --- a/linux-core/openchrome/via_execbuf.c +++ b/linux-core/openchrome/via_execbuf.c @@ -551,7 +551,6 @@ static int via_validate_buffer_list(struct drm_file *file_priv, item->ret = 0; req = &item->req; - mutex_lock(&bo->mutex); ret = via_placement_fence_type(bo, req->set_flags, req->clear_flags, @@ -570,7 +569,6 @@ static int via_validate_buffer_list(struct drm_file *file_priv, item->offset = bo->offset; item->flags = bo->mem.placement; - mutex_unlock(&bo->mutex); ret = via_check_presumed(&item->req, bo, item->user_val_arg, &item->po_correct); @@ -587,7 +585,6 @@ static int via_validate_buffer_list(struct drm_file *file_priv, return 0; out_err: - mutex_unlock(&bo->mutex); item->ret = ret; return ret; } @@ -612,13 +609,13 @@ static int via_handle_copyback(struct drm_device *dev, if (!arg.ret) { struct ttm_buffer_object *bo = entry->bo; - mutex_lock(&bo->mutex); - arg.d.rep.gpu_offset = bo->offset; - arg.d.rep.placement = bo->mem.placement; + arg.d.rep.gpu_offset = vbuf->offset; + arg.d.rep.placement = vbuf->flags; + spin_lock(&bo->lock); arg.d.rep.fence_type_mask = (uint32_t) (unsigned long) entry->new_sync_obj_arg; - mutex_unlock(&bo->mutex); + spin_unlock(&bo->lock); } if (__copy_to_user(vbuf->user_val_arg, diff --git a/linux-core/ttm/ttm_bo.c b/linux-core/ttm/ttm_bo.c index cae883e2..4f859d56 100644 --- a/linux-core/ttm/ttm_bo.c +++ b/linux-core/ttm/ttm_bo.c @@ -320,9 +320,13 @@ static int ttm_bo_handle_move_mem(struct ttm_buffer_object *bo, bo->evicted = false; } - if (bo->mem.mm_node) + if (bo->mem.mm_node) { + spin_lock(&bo->lock); bo->offset = (bo->mem.mm_node->start << PAGE_SHIFT) + bdev->man[bo->mem.mem_type].gpu_offset; + bo->cur_placement = bo->mem.placement; + spin_unlock(&bo->lock); + } return 0; @@ -337,37 +341,6 @@ static int ttm_bo_handle_move_mem(struct ttm_buffer_object *bo, return ret; } -static int ttm_bo_expire_sync_obj(struct ttm_buffer_object *bo, - bool allow_errors) -{ - struct ttm_bo_device *bdev = bo->bdev; - struct ttm_bo_driver *driver = bdev->driver; - - if (bo->sync_obj) { - if (bdev->nice_mode) { - unsigned long _end = jiffies + 3 * HZ; - int ret; - do { - ret = ttm_bo_wait(bo, false, false, false); - if (ret && allow_errors) - return ret; - - } while (ret && !time_after_eq(jiffies, _end)); - - if (bo->sync_obj) { - bdev->nice_mode = false; - printk(KERN_ERR "Detected probable GPU lockup. " - "Evicting buffer.\n"); - } - } - if (bo->sync_obj) { - driver->sync_obj_unref(&bo->sync_obj); - clear_bit(TTM_BO_PRIV_FLAG_MOVING, &bo->priv_flags); - } - } - return 0; -} - /** * If bo idle, remove from delayed- and lru lists, and unref. * If not idle, and already on delayed list, do nothing. @@ -381,23 +354,20 @@ static int ttm_bo_cleanup_refs(struct ttm_buffer_object *bo, bool remove_all) struct ttm_bo_driver *driver = bdev->driver; int ret; - mutex_lock(&bo->mutex); - - if (bo->sync_obj && driver->sync_obj_signaled(bo->sync_obj, - bo->sync_obj_arg)) { - driver->sync_obj_unref(&bo->sync_obj); - clear_bit(TTM_BO_PRIV_FLAG_MOVING, &bo->priv_flags); - } - - if (bo->sync_obj && remove_all) - (void)ttm_bo_expire_sync_obj(bo, false); + spin_lock(&bo->lock); + (void) ttm_bo_wait(bo, false, false, !remove_all); if (!bo->sync_obj) { int put_count; + spin_unlock(&bo->lock); + + spin_lock(&bdev->lru_lock); + ret = ttm_bo_reserve_locked(bo, false, false, false, 0); + BUG_ON(ret); if (bo->ttm) ttm_tt_unbind(bo->ttm); - spin_lock(&bdev->lru_lock); + if (!list_empty(&bo->ddestroy)) { list_del_init(&bo->ddestroy); kref_put(&bo->list_kref, ttm_bo_ref_bug); @@ -408,7 +378,9 @@ static int ttm_bo_cleanup_refs(struct ttm_buffer_object *bo, bool remove_all) } put_count = ttm_bo_del_from_lru(bo); spin_unlock(&bdev->lru_lock); - mutex_unlock(&bo->mutex); + + atomic_set(&bo->reserved, 0); + while (put_count--) kref_put(&bo->list_kref, ttm_bo_release_list); @@ -417,22 +389,26 @@ static int ttm_bo_cleanup_refs(struct ttm_buffer_object *bo, bool remove_all) spin_lock(&bdev->lru_lock); if (list_empty(&bo->ddestroy)) { + void *sync_obj = bo->sync_obj; + void *sync_obj_arg = bo->sync_obj_arg; + + kref_get(&bo->list_kref); + list_add_tail(&bo->ddestroy, &bdev->ddestroy); spin_unlock(&bdev->lru_lock); - driver->sync_obj_flush(bo->sync_obj, bo->sync_obj_arg); - spin_lock(&bdev->lru_lock); - if (list_empty(&bo->ddestroy)) { - kref_get(&bo->list_kref); - list_add_tail(&bo->ddestroy, &bdev->ddestroy); - } - spin_unlock(&bdev->lru_lock); + spin_unlock(&bo->lock); + + if (sync_obj) + driver->sync_obj_flush(sync_obj, sync_obj_arg); schedule_delayed_work(&bdev->wq, ((HZ / 100) < 1) ? 1 : HZ / 100); ret = 0; + } else { spin_unlock(&bdev->lru_lock); + spin_unlock(&bo->lock); ret = -EBUSY; } - mutex_unlock(&bo->mutex); + return ret; } @@ -542,7 +518,10 @@ static int ttm_bo_evict(struct ttm_buffer_object *bo, unsigned mem_type, if (bo->mem.mem_type != mem_type) goto out; + spin_lock(&bo->lock); ret = ttm_bo_wait(bo, false, interruptible, no_wait); + spin_unlock(&bo->lock); + if (ret && ret != -ERESTART) { printk(KERN_ERR "Failed to expire sync object before " "buffer eviction.\n"); @@ -637,9 +616,7 @@ static int ttm_bo_mem_force_space(struct ttm_bo_device *bdev, while (put_count--) kref_put(&entry->list_kref, ttm_bo_ref_bug); - mutex_lock(&entry->mutex); ret = ttm_bo_evict(entry, mem_type, interruptible, no_wait); - mutex_unlock(&entry->mutex); ttm_bo_unreserve(entry); @@ -807,33 +784,6 @@ int ttm_bo_mem_space(struct ttm_buffer_object *bo, return ret; } -/* - * Call bo->mutex locked. - * Returns 1 if the buffer is currently rendered to or from. 0 otherwise. - */ - -static int ttm_bo_busy(struct ttm_buffer_object *bo) -{ - void *sync_obj = bo->sync_obj; - struct ttm_bo_driver *driver = bo->bdev->driver; - - if (sync_obj) { - if (driver->sync_obj_signaled(sync_obj, bo->sync_obj_arg)) { - driver->sync_obj_unref(&bo->sync_obj); - clear_bit(TTM_BO_PRIV_FLAG_MOVING, &bo->priv_flags); - return 0; - } - driver->sync_obj_flush(sync_obj, bo->sync_obj_arg); - if (driver->sync_obj_signaled(sync_obj, bo->sync_obj_arg)) { - driver->sync_obj_unref(&bo->sync_obj); - clear_bit(TTM_BO_PRIV_FLAG_MOVING, &bo->priv_flags); - return 0; - } - return 1; - } - return 0; -} - int ttm_bo_wait_cpu(struct ttm_buffer_object *bo, bool no_wait) { int ret = 0; @@ -866,8 +816,10 @@ int ttm_bo_move_buffer(struct ttm_buffer_object *bo, * instead of doing it here. */ - ttm_bo_busy(bo); + spin_lock(&bo->lock); ret = ttm_bo_wait(bo, false, interruptible, no_wait); + spin_unlock(&bo->lock); + if (ret) return ret; @@ -1011,8 +963,7 @@ int ttm_buffer_object_init(struct ttm_bo_device *bdev, } bo->destroy = destroy; - mutex_init(&bo->mutex); - mutex_lock(&bo->mutex); + spin_lock_init(&bo->lock); kref_init(&bo->kref); kref_init(&bo->list_kref); atomic_set(&bo->cpu_writers, 0); @@ -1061,12 +1012,10 @@ int ttm_buffer_object_init(struct ttm_bo_device *bdev, if (ret) goto out_err; - mutex_unlock(&bo->mutex); ttm_bo_unreserve(bo); return 0; out_err: - mutex_unlock(&bo->mutex); ttm_bo_unreserve(bo); ttm_bo_unref(&bo); @@ -1124,10 +1073,11 @@ static int ttm_bo_leave_list(struct ttm_buffer_object *bo, { int ret; - mutex_lock(&bo->mutex); + spin_lock(&bo->lock); + ret = ttm_bo_wait(bo, false, false, false); + spin_unlock(&bo->lock); - ret = ttm_bo_expire_sync_obj(bo, allow_errors); - if (ret) + if (ret && allow_errors) goto out; if (bo->mem.mem_type == mem_type) @@ -1143,7 +1093,6 @@ static int ttm_bo_leave_list(struct ttm_buffer_object *bo, } out: - mutex_unlock(&bo->mutex); return ret; } @@ -1522,36 +1471,48 @@ int ttm_bo_wait(struct ttm_buffer_object *bo, void *sync_obj; void *sync_obj_arg; int ret = 0; + + if (likely(bo->sync_obj == NULL)) + return 0; while (bo->sync_obj) { + if (driver->sync_obj_signaled(bo->sync_obj, bo->sync_obj_arg)) { - driver->sync_obj_unref(&bo->sync_obj); + void *tmp_obj = bo->sync_obj; + bo->sync_obj = NULL; clear_bit(TTM_BO_PRIV_FLAG_MOVING, &bo->priv_flags); - goto out; - } - if (no_wait) { - ret = -EBUSY; - goto out; + spin_unlock(&bo->lock); + driver->sync_obj_unref(&tmp_obj); + spin_lock(&bo->lock); + continue; } + + if (no_wait) + return -EBUSY; + sync_obj = driver->sync_obj_ref(bo->sync_obj); sync_obj_arg = bo->sync_obj_arg; - mutex_unlock(&bo->mutex); + spin_unlock(&bo->lock); ret = driver->sync_obj_wait(sync_obj, sync_obj_arg, lazy, interruptible); - - mutex_lock(&bo->mutex); if (unlikely(ret != 0)) { driver->sync_obj_unref(&sync_obj); + spin_lock(&bo->lock); return ret; } - - if (bo->sync_obj == sync_obj) { - driver->sync_obj_unref(&bo->sync_obj); - clear_bit(TTM_BO_PRIV_FLAG_MOVING, &bo->priv_flags); + spin_lock(&bo->lock); + if (likely(bo->sync_obj == sync_obj && + bo->sync_obj_arg == sync_obj_arg)) { + void *tmp_obj = bo->sync_obj; + bo->sync_obj = NULL; + clear_bit(TTM_BO_PRIV_FLAG_MOVING, + &bo->priv_flags); + spin_unlock(&bo->lock); + driver->sync_obj_unref(&sync_obj); + driver->sync_obj_unref(&tmp_obj); + spin_lock(&bo->lock); } - driver->sync_obj_unref(&sync_obj); } - out: return 0; } @@ -1594,13 +1555,11 @@ int ttm_bo_synccpu_write_grab(struct ttm_buffer_object *bo, bool no_wait) ret = ttm_bo_reserve(bo, true, no_wait, false, 0); if (unlikely(ret != 0)) return ret; - mutex_lock(&bo->mutex); + spin_lock(&bo->lock); ret = ttm_bo_wait(bo, false, true, no_wait); - if (unlikely(ret != 0)) - goto out_err0; - atomic_inc(&bo->cpu_writers); - out_err0: - mutex_unlock(&bo->mutex); + spin_unlock(&bo->lock); + if (likely(ret == 0)) + atomic_inc(&bo->cpu_writers); ttm_bo_unreserve(bo); return ret; } @@ -1662,8 +1621,10 @@ static int ttm_bo_swapout(struct ttm_mem_shrink *shrink) * Wait for GPU, then move to system cached. */ - mutex_lock(&bo->mutex); + spin_lock(&bo->lock); ret = ttm_bo_wait(bo, false, false, false); + spin_unlock(&bo->lock); + if (unlikely(ret != 0)) goto out; @@ -1689,7 +1650,6 @@ static int ttm_bo_swapout(struct ttm_mem_shrink *shrink) ret = ttm_tt_swapout(bo->ttm, bo->persistant_swap_storage); out: - mutex_unlock(&bo->mutex); /** * diff --git a/linux-core/ttm/ttm_bo_api.h b/linux-core/ttm/ttm_bo_api.h index 4f2424a0..04a17601 100644 --- a/linux-core/ttm/ttm_bo_api.h +++ b/linux-core/ttm/ttm_bo_api.h @@ -110,14 +110,11 @@ struct ttm_tt; * keeps one refcount. When this refcount reaches zero, * the object is destroyed. * @event_queue: Queue for processes waiting on buffer object status change. - * @mutex: Lock protecting all members with the exception of constant members - * and list heads. We should really use a spinlock here. + * @lock: spinlock protecting mostly synchronization members. * @proposed_placement: Proposed placement for the buffer. Changed only by the * creator prior to validation as opposed to bo->mem.proposed_flags which is * changed by the implementation prior to a buffer move if it wants to outsmart * the buffer creator / user. This latter happens, for example, at eviction. - * @offset: The current GPU offset, which can have different meanings - * depending on the memory type. For SYSTEM type memory, it should be 0. * @mem: structure describing current placement. * @persistant_swap_storage: Usually the swap storage is deleted for buffers * pinned in physical memory. If this behaviour is not desired, this member @@ -139,6 +136,9 @@ struct ttm_tt; * @priv_flags: Flags describing buffer object internal state. * @vm_rb: Rb node for the vm rb tree. * @vm_node: Address space manager node. + * @offset: The current GPU offset, which can have different meanings + * depending on the memory type. For SYSTEM type memory, it should be 0. + * @cur_placement: Hint of current placement. * * Base class for TTM buffer object, that deals with data placement and CPU * mappings. GPU mappings are really up to the driver, but for simpler GPUs @@ -172,14 +172,13 @@ struct ttm_buffer_object { struct kref kref; struct kref list_kref; wait_queue_head_t event_queue; - struct mutex mutex; + spinlock_t lock; /** * Members protected by the bo::reserved lock. */ uint32_t proposed_placement; - unsigned long offset; struct ttm_mem_reg mem; struct file *persistant_swap_storage; struct ttm_tt *ttm; @@ -210,7 +209,7 @@ struct ttm_buffer_object { /** - * Members protected by the bo::mutex + * Members protected by the bo::lock */ void *sync_obj_arg; @@ -225,6 +224,14 @@ struct ttm_buffer_object { struct drm_mm_node *vm_node; + /** + * Special members that are protected by the reserve lock + * and the bo::lock when written to. Can be read with + * either of these locks held. + */ + + unsigned long offset; + uint32_t cur_placement; }; /** diff --git a/linux-core/ttm/ttm_bo_util.c b/linux-core/ttm/ttm_bo_util.c index d53474f2..278c24ba 100644 --- a/linux-core/ttm/ttm_bo_util.c +++ b/linux-core/ttm/ttm_bo_util.c @@ -287,8 +287,7 @@ static int ttm_buffer_object_transfer(struct ttm_buffer_object *bo, * TODO: Explicit member copy would probably be better here. */ - mutex_init(&fbo->mutex); - mutex_lock(&fbo->mutex); + spin_lock_init(&fbo->lock); init_waitqueue_head(&fbo->event_queue); INIT_LIST_HEAD(&fbo->ddestroy); INIT_LIST_HEAD(&fbo->lru); @@ -302,8 +301,6 @@ static int ttm_buffer_object_transfer(struct ttm_buffer_object *bo, kref_init(&fbo->kref); fbo->destroy = &ttm_transfered_destroy; - mutex_unlock(&fbo->mutex); - *new_obj = fbo; return 0; } @@ -497,17 +494,28 @@ int ttm_bo_move_accel_cleanup(struct ttm_buffer_object *bo, int ret; uint32_t save_flags = old_mem->placement; struct ttm_buffer_object *ghost_obj; - if (bo->sync_obj) - driver->sync_obj_unref(&bo->sync_obj); + void *tmp_obj = NULL; + + spin_lock(&bo->lock); + if (bo->sync_obj) { + tmp_obj = bo->sync_obj; + bo->sync_obj = NULL; + } bo->sync_obj = driver->sync_obj_ref(sync_obj); bo->sync_obj_arg = sync_obj_arg; - if (evict) { + if (evict) { ret = ttm_bo_wait(bo, false, false, false); + spin_unlock(&bo->lock); + driver->sync_obj_unref(&bo->sync_obj); + if (ret) return ret; + ttm_bo_free_old_node(bo); if ((man->flags & TTM_MEMTYPE_FLAG_FIXED) && (bo->ttm != NULL)) { - ttm_tt_unbind(bo->ttm); ttm_tt_destroy(bo->ttm); bo->ttm = NULL; + ttm_tt_unbind(bo->ttm); + ttm_tt_destroy(bo->ttm); + bo->ttm = NULL; } } else { /** @@ -518,6 +526,9 @@ int ttm_bo_move_accel_cleanup(struct ttm_buffer_object *bo, * operation has completed. */ + set_bit(TTM_BO_PRIV_FLAG_MOVING, &bo->priv_flags); + spin_unlock(&bo->lock); + ret = ttm_buffer_object_transfer(bo, &ghost_obj); if (ret) return ret; @@ -533,7 +544,6 @@ int ttm_bo_move_accel_cleanup(struct ttm_buffer_object *bo, else bo->ttm = NULL; - set_bit(TTM_BO_PRIV_FLAG_MOVING, &bo->priv_flags); ttm_bo_unreserve(ghost_obj); ttm_bo_unref(&ghost_obj); } diff --git a/linux-core/ttm/ttm_bo_vm.c b/linux-core/ttm/ttm_bo_vm.c index 7df98589..8fd21908 100644 --- a/linux-core/ttm/ttm_bo_vm.c +++ b/linux-core/ttm/ttm_bo_vm.c @@ -108,21 +108,23 @@ static int ttm_bo_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf) return VM_FAULT_NOPAGE; } - mutex_lock(&bo->mutex); - /* * Wait for buffer data in transit, due to a pipelined * move. */ + spin_lock(&bo->lock); if (test_bit(TTM_BO_PRIV_FLAG_MOVING, &bo->priv_flags)) { ret = ttm_bo_wait(bo, false, true, false); + spin_unlock(&bo->lock); if (unlikely(ret != 0)) { retval = (ret != -ERESTART) ? VM_FAULT_SIGBUS : VM_FAULT_NOPAGE; goto out_unlock; } - } + } else + spin_unlock(&bo->lock); + ret = ttm_bo_pci_offset(bdev, &bo->mem, &bus_base, &bus_offset, &bus_size); @@ -213,7 +215,6 @@ static int ttm_bo_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf) } out_unlock: - mutex_unlock(&bo->mutex); ttm_bo_unreserve(bo); return retval; } @@ -252,21 +253,21 @@ static unsigned long ttm_bo_vm_nopfn(struct vm_area_struct *vma, return NOPFN_REFAULT; } - mutex_lock(&bo->mutex); - /* * Wait for buffer data in transit, due to a pipelined * move. */ - + spin_lock(&bo->lock); if (test_bit(TTM_BO_PRIV_FLAG_MOVING, &bo->priv_flags)) { ret = ttm_bo_wait(bo, false, true, false); + spin_unlock(&bo->lock); if (unlikely(ret != 0)) { retval = (ret != -ERESTART) ? NOPFN_SIGBUS : NOPFN_REFAULT; goto out_unlock; } - } + } else + spin_unlock(&bo->lock); ret = ttm_bo_pci_offset(bdev, &bo->mem, &bus_base, &bus_offset, &bus_size); @@ -360,7 +361,6 @@ static unsigned long ttm_bo_vm_nopfn(struct vm_area_struct *vma, } out_unlock: - mutex_unlock(&bo->mutex); ttm_bo_unreserve(bo); return retval; } diff --git a/linux-core/ttm/ttm_execbuf_util.c b/linux-core/ttm/ttm_execbuf_util.c index 4a34c18c..09495c71 100644 --- a/linux-core/ttm/ttm_execbuf_util.c +++ b/linux-core/ttm/ttm_execbuf_util.c @@ -102,11 +102,11 @@ void ttm_eu_fence_buffer_objects(struct list_head *list, void *sync_obj) struct ttm_bo_driver *driver = bo->bdev->driver; void *old_sync_obj; - mutex_lock(&bo->mutex); + spin_lock(&bo->lock); old_sync_obj = bo->sync_obj; bo->sync_obj = driver->sync_obj_ref(sync_obj); bo->sync_obj_arg = entry->new_sync_obj_arg; - mutex_unlock(&bo->mutex); + spin_unlock(&bo->lock); ttm_bo_unreserve(bo); entry->reserved = false; if (old_sync_obj) diff --git a/linux-core/ttm/ttm_placement_user.c b/linux-core/ttm/ttm_placement_user.c index 87df2c7c..93815a46 100644 --- a/linux-core/ttm/ttm_placement_user.c +++ b/linux-core/ttm/ttm_placement_user.c @@ -139,12 +139,15 @@ static void ttm_pl_fill_rep(struct ttm_buffer_object *bo, struct ttm_bo_user_object *user_bo = container_of(bo, struct ttm_bo_user_object, bo); - rep->gpu_offset = bo->offset; rep->bo_size = bo->num_pages << PAGE_SHIFT; rep->map_handle = bo->addr_space_offset; - rep->placement = bo->mem.placement; rep->handle = user_bo->base.hash.key; + + spin_lock(&bo->lock); + rep->placement = bo->cur_placement; + rep->gpu_offset = bo->offset; rep->sync_object_arg = (uint32_t) (unsigned long)bo->sync_obj_arg; + spin_unlock(&bo->lock); } int ttm_pl_create_ioctl(struct ttm_object_file *tfile, @@ -204,9 +207,7 @@ int ttm_pl_create_ioctl(struct ttm_object_file *tfile, if (unlikely(ret != 0)) goto out_err; - mutex_lock(&bo->mutex); ttm_pl_fill_rep(bo, rep); - mutex_unlock(&bo->mutex); ttm_bo_unref(&bo); out: return 0; @@ -270,9 +271,7 @@ int ttm_pl_ub_create_ioctl(struct ttm_object_file *tfile, if (unlikely(ret != 0)) goto out_err; - mutex_lock(&bo->mutex); ttm_pl_fill_rep(bo, rep); - mutex_unlock(&bo->mutex); ttm_bo_unref(&bo); out: return ret; @@ -305,9 +304,7 @@ int ttm_pl_reference_ioctl(struct ttm_object_file *tfile, void *data) goto out; } - mutex_lock(&bo->mutex); ttm_pl_fill_rep(bo, rep); - mutex_unlock(&bo->mutex); out: base = &user_bo->base; @@ -397,7 +394,6 @@ int ttm_pl_setstatus_ioctl(struct ttm_object_file *tfile, if (unlikely(ret != 0)) goto out_err2; - mutex_lock(&bo->mutex); ret = ttm_bo_check_placement(bo, req->set_placement, req->clr_placement); if (unlikely(ret != 0)) @@ -411,7 +407,6 @@ int ttm_pl_setstatus_ioctl(struct ttm_object_file *tfile, ttm_pl_fill_rep(bo, rep); out_err2: - mutex_unlock(&bo->mutex); ttm_bo_unreserve(bo); out_err1: ttm_read_unlock(lock); @@ -437,11 +432,11 @@ int ttm_pl_waitidle_ioctl(struct ttm_object_file *tfile, void *data) arg->mode & TTM_PL_WAITIDLE_MODE_NO_BLOCK); if (unlikely(ret != 0)) goto out; - mutex_lock(&bo->mutex); + spin_lock(&bo->lock); ret = ttm_bo_wait(bo, arg->mode & TTM_PL_WAITIDLE_MODE_LAZY, true, arg->mode & TTM_PL_WAITIDLE_MODE_NO_BLOCK); - mutex_unlock(&bo->mutex); + spin_unlock(&bo->lock); ttm_bo_unblock_reservation(bo); out: ttm_bo_unref(&bo); -- cgit v1.2.3