summaryrefslogtreecommitdiff
path: root/drivers/gpu/drm/i915/selftests/i915_gem_object.c
diff options
context:
space:
mode:
authorChris Wilson <chris@chris-wilson.co.uk>2019-01-14 14:21:22 +0000
committerChris Wilson <chris@chris-wilson.co.uk>2019-01-14 16:18:20 +0000
commitc9d08cc3e3393e19162cb2cfaa1f454baf2aaffe (patch)
treeae511b0a4e61b3eda243ea9ea0c978ab15a58cd4 /drivers/gpu/drm/i915/selftests/i915_gem_object.c
parent2cb2cb5ff41abd92d6a7bfb1459b25974fa6d509 (diff)
drm/i915/selftests: Mark up rpm wakerefs
Track the temporary wakerefs used within the selftests so that leaks are clear. v2: Add a couple of coarse annotations for mock selftests as we now loudly warn about the errors. Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk> Cc: Jani Nikula <jani.nikula@intel.com> Reviewed-by: Mika Kuoppala <mika.kuoppala@linux.intel.com> Link: https://patchwork.freedesktop.org/patch/msgid/20190114142129.24398-14-chris@chris-wilson.co.uk
Diffstat (limited to 'drivers/gpu/drm/i915/selftests/i915_gem_object.c')
-rw-r--r--drivers/gpu/drm/i915/selftests/i915_gem_object.c18
1 files changed, 12 insertions, 6 deletions
diff --git a/drivers/gpu/drm/i915/selftests/i915_gem_object.c b/drivers/gpu/drm/i915/selftests/i915_gem_object.c
index b03890c590d7..3575e1387c3f 100644
--- a/drivers/gpu/drm/i915/selftests/i915_gem_object.c
+++ b/drivers/gpu/drm/i915/selftests/i915_gem_object.c
@@ -308,6 +308,7 @@ static int igt_partial_tiling(void *arg)
const unsigned int nreal = 1 << 12; /* largest tile row x2 */
struct drm_i915_private *i915 = arg;
struct drm_i915_gem_object *obj;
+ intel_wakeref_t wakeref;
int tiling;
int err;
@@ -333,7 +334,7 @@ static int igt_partial_tiling(void *arg)
}
mutex_lock(&i915->drm.struct_mutex);
- intel_runtime_pm_get(i915);
+ wakeref = intel_runtime_pm_get(i915);
if (1) {
IGT_TIMEOUT(end);
@@ -444,7 +445,7 @@ next_tiling: ;
}
out_unlock:
- intel_runtime_pm_put_unchecked(i915);
+ intel_runtime_pm_put(i915, wakeref);
mutex_unlock(&i915->drm.struct_mutex);
i915_gem_object_unpin_pages(obj);
out:
@@ -506,11 +507,14 @@ static void disable_retire_worker(struct drm_i915_private *i915)
mutex_lock(&i915->drm.struct_mutex);
if (!i915->gt.active_requests++) {
- intel_runtime_pm_get(i915);
+ intel_wakeref_t wakeref;
+
+ wakeref = intel_runtime_pm_get(i915);
i915_gem_unpark(i915);
- intel_runtime_pm_put_unchecked(i915);
+ intel_runtime_pm_put(i915, wakeref);
}
mutex_unlock(&i915->drm.struct_mutex);
+
cancel_delayed_work_sync(&i915->gt.retire_work);
cancel_delayed_work_sync(&i915->gt.idle_work);
}
@@ -578,6 +582,8 @@ static int igt_mmap_offset_exhaustion(void *arg)
/* Now fill with busy dead objects that we expect to reap */
for (loop = 0; loop < 3; loop++) {
+ intel_wakeref_t wakeref;
+
if (i915_terminally_wedged(&i915->gpu_error))
break;
@@ -588,9 +594,9 @@ static int igt_mmap_offset_exhaustion(void *arg)
}
mutex_lock(&i915->drm.struct_mutex);
- intel_runtime_pm_get(i915);
+ wakeref = intel_runtime_pm_get(i915);
err = make_obj_busy(obj);
- intel_runtime_pm_put_unchecked(i915);
+ intel_runtime_pm_put(i915, wakeref);
mutex_unlock(&i915->drm.struct_mutex);
if (err) {
pr_err("[loop %d] Failed to busy the object\n", loop);