From d4225a535b3b086868ce1f82dc0593d85d04dae8 Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Mon, 14 Jan 2019 14:21:23 +0000 Subject: drm/i915: Syntatic sugar for using intel_runtime_pm Frequently, we use intel_runtime_pm_get/_put around a small block. Formalise that usage by providing a macro to define such a block with an automatic closure to scope the intel_runtime_pm wakeref to that block, i.e. macro abuse smelling of python. Signed-off-by: Chris Wilson Cc: Jani Nikula Reviewed-by: Mika Kuoppala Link: https://patchwork.freedesktop.org/patch/msgid/20190114142129.24398-15-chris@chris-wilson.co.uk --- drivers/gpu/drm/i915/i915_debugfs.c | 162 +++++++++------------ drivers/gpu/drm/i915/i915_gem.c | 10 +- drivers/gpu/drm/i915/i915_gem_gtt.c | 23 ++- drivers/gpu/drm/i915/i915_gem_shrinker.c | 51 ++++--- drivers/gpu/drm/i915/i915_pmu.c | 7 +- drivers/gpu/drm/i915/i915_sysfs.c | 7 +- drivers/gpu/drm/i915/intel_drv.h | 8 + drivers/gpu/drm/i915/intel_guc_log.c | 26 ++-- drivers/gpu/drm/i915/intel_huc.c | 7 +- drivers/gpu/drm/i915/intel_panel.c | 18 ++- drivers/gpu/drm/i915/intel_uncore.c | 30 ++-- drivers/gpu/drm/i915/selftests/i915_gem.c | 34 ++--- drivers/gpu/drm/i915/selftests/i915_gem_context.c | 12 +- drivers/gpu/drm/i915/selftests/i915_gem_evict.c | 8 +- drivers/gpu/drm/i915/selftests/i915_gem_object.c | 11 +- drivers/gpu/drm/i915/selftests/i915_request.c | 8 +- drivers/gpu/drm/i915/selftests/intel_workarounds.c | 28 ++-- 17 files changed, 209 insertions(+), 241 deletions(-) (limited to 'drivers/gpu') diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c index 66c520ba0df8..1c7913b40bb7 100644 --- a/drivers/gpu/drm/i915/i915_debugfs.c +++ b/drivers/gpu/drm/i915/i915_debugfs.c @@ -953,9 +953,9 @@ static int i915_gpu_info_open(struct inode *inode, struct file *file) struct i915_gpu_state *gpu; intel_wakeref_t wakeref; - wakeref = intel_runtime_pm_get(i915); - gpu = i915_capture_gpu_state(i915); - intel_runtime_pm_put(i915, wakeref); + gpu = NULL; + with_intel_runtime_pm(i915, wakeref) + gpu = i915_capture_gpu_state(i915); if (IS_ERR(gpu)) return PTR_ERR(gpu); @@ -1287,17 +1287,15 @@ static int i915_hangcheck_info(struct seq_file *m, void *unused) return 0; } - wakeref = intel_runtime_pm_get(dev_priv); + with_intel_runtime_pm(dev_priv, wakeref) { + for_each_engine(engine, dev_priv, id) { + acthd[id] = intel_engine_get_active_head(engine); + seqno[id] = intel_engine_get_seqno(engine); + } - for_each_engine(engine, dev_priv, id) { - acthd[id] = intel_engine_get_active_head(engine); - seqno[id] = intel_engine_get_seqno(engine); + intel_engine_get_instdone(dev_priv->engine[RCS], &instdone); } - intel_engine_get_instdone(dev_priv->engine[RCS], &instdone); - - intel_runtime_pm_put(dev_priv, wakeref); - if (timer_pending(&dev_priv->gpu_error.hangcheck_work.timer)) seq_printf(m, "Hangcheck active, timer fires in %dms\n", jiffies_to_msecs(dev_priv->gpu_error.hangcheck_work.timer.expires - @@ -1573,18 +1571,16 @@ static int i915_drpc_info(struct seq_file *m, void *unused) { struct drm_i915_private *dev_priv = node_to_i915(m->private); intel_wakeref_t wakeref; - int err; - - wakeref = intel_runtime_pm_get(dev_priv); - - if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) - err = vlv_drpc_info(m); - else if (INTEL_GEN(dev_priv) >= 6) - err = gen6_drpc_info(m); - else - err = ironlake_drpc_info(m); + int err = -ENODEV; - intel_runtime_pm_put(dev_priv, wakeref); + with_intel_runtime_pm(dev_priv, wakeref) { + if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) + err = vlv_drpc_info(m); + else if (INTEL_GEN(dev_priv) >= 6) + err = gen6_drpc_info(m); + else + err = ironlake_drpc_info(m); + } return err; } @@ -2068,8 +2064,7 @@ static int i915_rps_boost_info(struct seq_file *m, void *data) intel_wakeref_t wakeref; struct drm_file *file; - wakeref = intel_runtime_pm_get_if_in_use(dev_priv); - if (wakeref) { + with_intel_runtime_pm_if_in_use(dev_priv, wakeref) { if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) { mutex_lock(&dev_priv->pcu_lock); act_freq = vlv_punit_read(dev_priv, @@ -2080,7 +2075,6 @@ static int i915_rps_boost_info(struct seq_file *m, void *data) act_freq = intel_get_cagf(dev_priv, I915_READ(GEN6_RPSTAT1)); } - intel_runtime_pm_put(dev_priv, wakeref); } seq_printf(m, "RPS enabled? %d\n", rps->enabled); @@ -2172,9 +2166,8 @@ static int i915_huc_load_status_info(struct seq_file *m, void *data) p = drm_seq_file_printer(m); intel_uc_fw_dump(&dev_priv->huc.fw, &p); - wakeref = intel_runtime_pm_get(dev_priv); - seq_printf(m, "\nHuC status 0x%08x:\n", I915_READ(HUC_STATUS2)); - intel_runtime_pm_put(dev_priv, wakeref); + with_intel_runtime_pm(dev_priv, wakeref) + seq_printf(m, "\nHuC status 0x%08x:\n", I915_READ(HUC_STATUS2)); return 0; } @@ -2184,7 +2177,6 @@ static int i915_guc_load_status_info(struct seq_file *m, void *data) struct drm_i915_private *dev_priv = node_to_i915(m->private); intel_wakeref_t wakeref; struct drm_printer p; - u32 tmp, i; if (!HAS_GUC(dev_priv)) return -ENODEV; @@ -2192,22 +2184,23 @@ static int i915_guc_load_status_info(struct seq_file *m, void *data) p = drm_seq_file_printer(m); intel_uc_fw_dump(&dev_priv->guc.fw, &p); - wakeref = intel_runtime_pm_get(dev_priv); - - tmp = I915_READ(GUC_STATUS); - - seq_printf(m, "\nGuC status 0x%08x:\n", tmp); - seq_printf(m, "\tBootrom status = 0x%x\n", - (tmp & GS_BOOTROM_MASK) >> GS_BOOTROM_SHIFT); - seq_printf(m, "\tuKernel status = 0x%x\n", - (tmp & GS_UKERNEL_MASK) >> GS_UKERNEL_SHIFT); - seq_printf(m, "\tMIA Core status = 0x%x\n", - (tmp & GS_MIA_MASK) >> GS_MIA_SHIFT); - seq_puts(m, "\nScratch registers:\n"); - for (i = 0; i < 16; i++) - seq_printf(m, "\t%2d: \t0x%x\n", i, I915_READ(SOFT_SCRATCH(i))); - - intel_runtime_pm_put(dev_priv, wakeref); + with_intel_runtime_pm(dev_priv, wakeref) { + u32 tmp = I915_READ(GUC_STATUS); + u32 i; + + seq_printf(m, "\nGuC status 0x%08x:\n", tmp); + seq_printf(m, "\tBootrom status = 0x%x\n", + (tmp & GS_BOOTROM_MASK) >> GS_BOOTROM_SHIFT); + seq_printf(m, "\tuKernel status = 0x%x\n", + (tmp & GS_UKERNEL_MASK) >> GS_UKERNEL_SHIFT); + seq_printf(m, "\tMIA Core status = 0x%x\n", + (tmp & GS_MIA_MASK) >> GS_MIA_SHIFT); + seq_puts(m, "\nScratch registers:\n"); + for (i = 0; i < 16; i++) { + seq_printf(m, "\t%2d: \t0x%x\n", + i, I915_READ(SOFT_SCRATCH(i))); + } + } return 0; } @@ -2680,19 +2673,14 @@ static int i915_energy_uJ(struct seq_file *m, void *data) if (INTEL_GEN(dev_priv) < 6) return -ENODEV; - wakeref = intel_runtime_pm_get(dev_priv); - - if (rdmsrl_safe(MSR_RAPL_POWER_UNIT, &power)) { - intel_runtime_pm_put(dev_priv, wakeref); + if (rdmsrl_safe(MSR_RAPL_POWER_UNIT, &power)) return -ENODEV; - } units = (power & 0x1f00) >> 8; - power = I915_READ(MCH_SECP_NRG_STTS); - power = (1000000 * power) >> units; /* convert to uJ */ - - intel_runtime_pm_put(dev_priv, wakeref); + with_intel_runtime_pm(dev_priv, wakeref) + power = I915_READ(MCH_SECP_NRG_STTS); + power = (1000000 * power) >> units; /* convert to uJ */ seq_printf(m, "%llu", power); return 0; @@ -3275,22 +3263,20 @@ static ssize_t i915_ipc_status_write(struct file *file, const char __user *ubuf, struct seq_file *m = file->private_data; struct drm_i915_private *dev_priv = m->private; intel_wakeref_t wakeref; - int ret; bool enable; + int ret; ret = kstrtobool_from_user(ubuf, len, &enable); if (ret < 0) return ret; - wakeref = intel_runtime_pm_get(dev_priv); - - if (!dev_priv->ipc_enabled && enable) - DRM_INFO("Enabling IPC: WM will be proper only after next commit\n"); - dev_priv->wm.distrust_bios_wm = true; - dev_priv->ipc_enabled = enable; - intel_enable_ipc(dev_priv); - - intel_runtime_pm_put(dev_priv, wakeref); + with_intel_runtime_pm(dev_priv, wakeref) { + if (!dev_priv->ipc_enabled && enable) + DRM_INFO("Enabling IPC: WM will be proper only after next commit\n"); + dev_priv->wm.distrust_bios_wm = true; + dev_priv->ipc_enabled = enable; + intel_enable_ipc(dev_priv); + } return len; } @@ -4130,16 +4116,13 @@ i915_cache_sharing_get(void *data, u64 *val) { struct drm_i915_private *dev_priv = data; intel_wakeref_t wakeref; - u32 snpcr; + u32 snpcr = 0; if (!(IS_GEN_RANGE(dev_priv, 6, 7))) return -ENODEV; - wakeref = intel_runtime_pm_get(dev_priv); - - snpcr = I915_READ(GEN6_MBCUNIT_SNPCR); - - intel_runtime_pm_put(dev_priv, wakeref); + with_intel_runtime_pm(dev_priv, wakeref) + snpcr = I915_READ(GEN6_MBCUNIT_SNPCR); *val = (snpcr & GEN6_MBC_SNPCR_MASK) >> GEN6_MBC_SNPCR_SHIFT; @@ -4151,7 +4134,6 @@ i915_cache_sharing_set(void *data, u64 val) { struct drm_i915_private *dev_priv = data; intel_wakeref_t wakeref; - u32 snpcr; if (!(IS_GEN_RANGE(dev_priv, 6, 7))) return -ENODEV; @@ -4159,16 +4141,17 @@ i915_cache_sharing_set(void *data, u64 val) if (val > 3) return -EINVAL; - wakeref = intel_runtime_pm_get(dev_priv); DRM_DEBUG_DRIVER("Manually setting uncore sharing to %llu\n", val); + with_intel_runtime_pm(dev_priv, wakeref) { + u32 snpcr; + + /* Update the cache sharing policy here as well */ + snpcr = I915_READ(GEN6_MBCUNIT_SNPCR); + snpcr &= ~GEN6_MBC_SNPCR_MASK; + snpcr |= val << GEN6_MBC_SNPCR_SHIFT; + I915_WRITE(GEN6_MBCUNIT_SNPCR, snpcr); + } - /* Update the cache sharing policy here as well */ - snpcr = I915_READ(GEN6_MBCUNIT_SNPCR); - snpcr &= ~GEN6_MBC_SNPCR_MASK; - snpcr |= (val << GEN6_MBC_SNPCR_SHIFT); - I915_WRITE(GEN6_MBCUNIT_SNPCR, snpcr); - - intel_runtime_pm_put(dev_priv, wakeref); return 0; } @@ -4405,20 +4388,17 @@ static int i915_sseu_status(struct seq_file *m, void *unused) sseu.max_eus_per_subslice = RUNTIME_INFO(dev_priv)->sseu.max_eus_per_subslice; - wakeref = intel_runtime_pm_get(dev_priv); - - if (IS_CHERRYVIEW(dev_priv)) { - cherryview_sseu_device_status(dev_priv, &sseu); - } else if (IS_BROADWELL(dev_priv)) { - broadwell_sseu_device_status(dev_priv, &sseu); - } else if (IS_GEN(dev_priv, 9)) { - gen9_sseu_device_status(dev_priv, &sseu); - } else if (INTEL_GEN(dev_priv) >= 10) { - gen10_sseu_device_status(dev_priv, &sseu); + with_intel_runtime_pm(dev_priv, wakeref) { + if (IS_CHERRYVIEW(dev_priv)) + cherryview_sseu_device_status(dev_priv, &sseu); + else if (IS_BROADWELL(dev_priv)) + broadwell_sseu_device_status(dev_priv, &sseu); + else if (IS_GEN(dev_priv, 9)) + gen9_sseu_device_status(dev_priv, &sseu); + else if (INTEL_GEN(dev_priv) >= 10) + gen10_sseu_device_status(dev_priv, &sseu); } - intel_runtime_pm_put(dev_priv, wakeref); - i915_print_sseu_info(m, false, &sseu); return 0; diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index 3186859ff378..f5e2456c4f73 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -813,13 +813,13 @@ void i915_gem_flush_ggtt_writes(struct drm_i915_private *dev_priv) i915_gem_chipset_flush(dev_priv); - wakeref = intel_runtime_pm_get(dev_priv); - spin_lock_irq(&dev_priv->uncore.lock); + with_intel_runtime_pm(dev_priv, wakeref) { + spin_lock_irq(&dev_priv->uncore.lock); - POSTING_READ_FW(RING_HEAD(RENDER_RING_BASE)); + POSTING_READ_FW(RING_HEAD(RENDER_RING_BASE)); - spin_unlock_irq(&dev_priv->uncore.lock); - intel_runtime_pm_put(dev_priv, wakeref); + spin_unlock_irq(&dev_priv->uncore.lock); + } } static void diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c index e2c61633e95d..dbea14bf67cc 100644 --- a/drivers/gpu/drm/i915/i915_gem_gtt.c +++ b/drivers/gpu/drm/i915/i915_gem_gtt.c @@ -2535,9 +2535,8 @@ static int ggtt_bind_vma(struct i915_vma *vma, if (i915_gem_object_is_readonly(obj)) pte_flags |= PTE_READ_ONLY; - wakeref = intel_runtime_pm_get(i915); - vma->vm->insert_entries(vma->vm, vma, cache_level, pte_flags); - intel_runtime_pm_put(i915, wakeref); + with_intel_runtime_pm(i915, wakeref) + vma->vm->insert_entries(vma->vm, vma, cache_level, pte_flags); vma->page_sizes.gtt = I915_GTT_PAGE_SIZE; @@ -2556,9 +2555,8 @@ static void ggtt_unbind_vma(struct i915_vma *vma) struct drm_i915_private *i915 = vma->vm->i915; intel_wakeref_t wakeref; - wakeref = intel_runtime_pm_get(i915); - vma->vm->clear_range(vma->vm, vma->node.start, vma->size); - intel_runtime_pm_put(i915, wakeref); + with_intel_runtime_pm(i915, wakeref) + vma->vm->clear_range(vma->vm, vma->node.start, vma->size); } static int aliasing_gtt_bind_vma(struct i915_vma *vma, @@ -2592,9 +2590,10 @@ static int aliasing_gtt_bind_vma(struct i915_vma *vma, if (flags & I915_VMA_GLOBAL_BIND) { intel_wakeref_t wakeref; - wakeref = intel_runtime_pm_get(i915); - vma->vm->insert_entries(vma->vm, vma, cache_level, pte_flags); - intel_runtime_pm_put(i915, wakeref); + with_intel_runtime_pm(i915, wakeref) { + vma->vm->insert_entries(vma->vm, vma, + cache_level, pte_flags); + } } return 0; @@ -2605,11 +2604,11 @@ static void aliasing_gtt_unbind_vma(struct i915_vma *vma) struct drm_i915_private *i915 = vma->vm->i915; if (vma->flags & I915_VMA_GLOBAL_BIND) { + struct i915_address_space *vm = vma->vm; intel_wakeref_t wakeref; - wakeref = intel_runtime_pm_get(i915); - vma->vm->clear_range(vma->vm, vma->node.start, vma->size); - intel_runtime_pm_put(i915, wakeref); + with_intel_runtime_pm(i915, wakeref) + vm->clear_range(vm, vma->node.start, vma->size); } if (vma->flags & I915_VMA_LOCAL_BIND) { diff --git a/drivers/gpu/drm/i915/i915_gem_shrinker.c b/drivers/gpu/drm/i915/i915_gem_shrinker.c index f01489b05b5e..8ceecb026910 100644 --- a/drivers/gpu/drm/i915/i915_gem_shrinker.c +++ b/drivers/gpu/drm/i915/i915_gem_shrinker.c @@ -296,14 +296,14 @@ i915_gem_shrink(struct drm_i915_private *i915, unsigned long i915_gem_shrink_all(struct drm_i915_private *i915) { intel_wakeref_t wakeref; - unsigned long freed; + unsigned long freed = 0; - wakeref = intel_runtime_pm_get(i915); - freed = i915_gem_shrink(i915, -1UL, NULL, - I915_SHRINK_BOUND | - I915_SHRINK_UNBOUND | - I915_SHRINK_ACTIVE); - intel_runtime_pm_put(i915, wakeref); + with_intel_runtime_pm(i915, wakeref) { + freed = i915_gem_shrink(i915, -1UL, NULL, + I915_SHRINK_BOUND | + I915_SHRINK_UNBOUND | + I915_SHRINK_ACTIVE); + } return freed; } @@ -376,14 +376,14 @@ i915_gem_shrinker_scan(struct shrinker *shrinker, struct shrink_control *sc) if (sc->nr_scanned < sc->nr_to_scan && current_is_kswapd()) { intel_wakeref_t wakeref; - wakeref = intel_runtime_pm_get(i915); - freed += i915_gem_shrink(i915, - sc->nr_to_scan - sc->nr_scanned, - &sc->nr_scanned, - I915_SHRINK_ACTIVE | - I915_SHRINK_BOUND | - I915_SHRINK_UNBOUND); - intel_runtime_pm_put(i915, wakeref); + with_intel_runtime_pm(i915, wakeref) { + freed += i915_gem_shrink(i915, + sc->nr_to_scan - sc->nr_scanned, + &sc->nr_scanned, + I915_SHRINK_ACTIVE | + I915_SHRINK_BOUND | + I915_SHRINK_UNBOUND); + } } shrinker_unlock(i915, unlock); @@ -400,11 +400,11 @@ i915_gem_shrinker_oom(struct notifier_block *nb, unsigned long event, void *ptr) unsigned long unevictable, bound, unbound, freed_pages; intel_wakeref_t wakeref; - wakeref = intel_runtime_pm_get(i915); - freed_pages = i915_gem_shrink(i915, -1UL, NULL, - I915_SHRINK_BOUND | - I915_SHRINK_UNBOUND); - intel_runtime_pm_put(i915, wakeref); + freed_pages = 0; + with_intel_runtime_pm(i915, wakeref) + freed_pages += i915_gem_shrink(i915, -1UL, NULL, + I915_SHRINK_BOUND | + I915_SHRINK_UNBOUND); /* Because we may be allocating inside our own driver, we cannot * assert that there are no objects with pinned pages that are not @@ -454,12 +454,11 @@ i915_gem_shrinker_vmap(struct notifier_block *nb, unsigned long event, void *ptr MAX_SCHEDULE_TIMEOUT)) goto out; - wakeref = intel_runtime_pm_get(i915); - freed_pages += i915_gem_shrink(i915, -1UL, NULL, - I915_SHRINK_BOUND | - I915_SHRINK_UNBOUND | - I915_SHRINK_VMAPS); - intel_runtime_pm_put(i915, wakeref); + with_intel_runtime_pm(i915, wakeref) + freed_pages += i915_gem_shrink(i915, -1UL, NULL, + I915_SHRINK_BOUND | + I915_SHRINK_UNBOUND | + I915_SHRINK_VMAPS); /* We also want to clear any cached iomaps as they wrap vmap */ list_for_each_entry_safe(vma, next, diff --git a/drivers/gpu/drm/i915/i915_pmu.c b/drivers/gpu/drm/i915/i915_pmu.c index 3d43fc9dd25d..b1cb2d3cae16 100644 --- a/drivers/gpu/drm/i915/i915_pmu.c +++ b/drivers/gpu/drm/i915/i915_pmu.c @@ -230,14 +230,11 @@ frequency_sample(struct drm_i915_private *dev_priv, unsigned int period_ns) val = dev_priv->gt_pm.rps.cur_freq; if (dev_priv->gt.awake) { - intel_wakeref_t wakeref = - intel_runtime_pm_get_if_in_use(dev_priv); + intel_wakeref_t wakeref; - if (wakeref) { + with_intel_runtime_pm_if_in_use(dev_priv, wakeref) val = intel_get_cagf(dev_priv, I915_READ_NOTRACE(GEN6_RPSTAT1)); - intel_runtime_pm_put(dev_priv, wakeref); - } } add_sample_mult(&dev_priv->pmu.sample[__I915_SAMPLE_FREQ_ACT], diff --git a/drivers/gpu/drm/i915/i915_sysfs.c b/drivers/gpu/drm/i915/i915_sysfs.c index 2cbbf165d179..41313005af42 100644 --- a/drivers/gpu/drm/i915/i915_sysfs.c +++ b/drivers/gpu/drm/i915/i915_sysfs.c @@ -43,11 +43,10 @@ static u32 calc_residency(struct drm_i915_private *dev_priv, i915_reg_t reg) { intel_wakeref_t wakeref; - u64 res; + u64 res = 0; - wakeref = intel_runtime_pm_get(dev_priv); - res = intel_rc6_residency_us(dev_priv, reg); - intel_runtime_pm_put(dev_priv, wakeref); + with_intel_runtime_pm(dev_priv, wakeref) + res = intel_rc6_residency_us(dev_priv, reg); return DIV_ROUND_CLOSEST_ULL(res, 1000); } diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h index a1e4e1033289..71377ec49a10 100644 --- a/drivers/gpu/drm/i915/intel_drv.h +++ b/drivers/gpu/drm/i915/intel_drv.h @@ -2187,6 +2187,14 @@ intel_wakeref_t intel_runtime_pm_get(struct drm_i915_private *i915); intel_wakeref_t intel_runtime_pm_get_if_in_use(struct drm_i915_private *i915); intel_wakeref_t intel_runtime_pm_get_noresume(struct drm_i915_private *i915); +#define with_intel_runtime_pm(i915, wf) \ + for ((wf) = intel_runtime_pm_get(i915); (wf); \ + intel_runtime_pm_put((i915), (wf)), (wf) = 0) + +#define with_intel_runtime_pm_if_in_use(i915, wf) \ + for ((wf) = intel_runtime_pm_get_if_in_use(i915); (wf); \ + intel_runtime_pm_put((i915), (wf)), (wf) = 0) + void intel_runtime_pm_put_unchecked(struct drm_i915_private *i915); #if IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM) void intel_runtime_pm_put(struct drm_i915_private *i915, intel_wakeref_t wref); diff --git a/drivers/gpu/drm/i915/intel_guc_log.c b/drivers/gpu/drm/i915/intel_guc_log.c index 20c0b36d748e..b53582c0c6c1 100644 --- a/drivers/gpu/drm/i915/intel_guc_log.c +++ b/drivers/gpu/drm/i915/intel_guc_log.c @@ -444,9 +444,8 @@ static void guc_log_capture_logs(struct intel_guc_log *log) * Generally device is expected to be active only at this * time, so get/put should be really quick. */ - wakeref = intel_runtime_pm_get(dev_priv); - guc_action_flush_log_complete(guc); - intel_runtime_pm_put(dev_priv, wakeref); + with_intel_runtime_pm(dev_priv, wakeref) + guc_action_flush_log_complete(guc); } int intel_guc_log_create(struct intel_guc_log *log) @@ -507,7 +506,7 @@ int intel_guc_log_set_level(struct intel_guc_log *log, u32 level) struct intel_guc *guc = log_to_guc(log); struct drm_i915_private *dev_priv = guc_to_i915(guc); intel_wakeref_t wakeref; - int ret; + int ret = 0; BUILD_BUG_ON(GUC_LOG_VERBOSITY_MIN != 0); GEM_BUG_ON(!log->vma); @@ -521,16 +520,14 @@ int intel_guc_log_set_level(struct intel_guc_log *log, u32 level) mutex_lock(&dev_priv->drm.struct_mutex); - if (log->level == level) { - ret = 0; + if (log->level == level) goto out_unlock; - } - wakeref = intel_runtime_pm_get(dev_priv); - ret = guc_action_control_log(guc, GUC_LOG_LEVEL_IS_VERBOSE(level), - GUC_LOG_LEVEL_IS_ENABLED(level), - GUC_LOG_LEVEL_TO_VERBOSITY(level)); - intel_runtime_pm_put(dev_priv, wakeref); + with_intel_runtime_pm(dev_priv, wakeref) + ret = guc_action_control_log(guc, + GUC_LOG_LEVEL_IS_VERBOSE(level), + GUC_LOG_LEVEL_IS_ENABLED(level), + GUC_LOG_LEVEL_TO_VERBOSITY(level)); if (ret) { DRM_DEBUG_DRIVER("guc_log_control action failed %d\n", ret); goto out_unlock; @@ -611,9 +608,8 @@ void intel_guc_log_relay_flush(struct intel_guc_log *log) */ flush_work(&log->relay.flush_work); - wakeref = intel_runtime_pm_get(i915); - guc_action_flush_log(guc); - intel_runtime_pm_put(i915, wakeref); + with_intel_runtime_pm(i915, wakeref) + guc_action_flush_log(guc); /* GuC would have updated log buffer by now, so capture it */ guc_log_capture_logs(log); diff --git a/drivers/gpu/drm/i915/intel_huc.c b/drivers/gpu/drm/i915/intel_huc.c index 3e8c18b6a42d..9bd1c9002c2a 100644 --- a/drivers/gpu/drm/i915/intel_huc.c +++ b/drivers/gpu/drm/i915/intel_huc.c @@ -116,14 +116,13 @@ int intel_huc_check_status(struct intel_huc *huc) { struct drm_i915_private *dev_priv = huc_to_i915(huc); intel_wakeref_t wakeref; - bool status; + bool status = false; if (!HAS_HUC(dev_priv)) return -ENODEV; - wakeref = intel_runtime_pm_get(dev_priv); - status = I915_READ(HUC_STATUS2) & HUC_FW_VERIFIED; - intel_runtime_pm_put(dev_priv, wakeref); + with_intel_runtime_pm(dev_priv, wakeref) + status = I915_READ(HUC_STATUS2) & HUC_FW_VERIFIED; return status; } diff --git a/drivers/gpu/drm/i915/intel_panel.c b/drivers/gpu/drm/i915/intel_panel.c index 93a2e4b5c54c..5a39a6347a7a 100644 --- a/drivers/gpu/drm/i915/intel_panel.c +++ b/drivers/gpu/drm/i915/intel_panel.c @@ -1204,17 +1204,19 @@ static int intel_backlight_device_get_brightness(struct backlight_device *bd) struct drm_device *dev = connector->base.dev; struct drm_i915_private *dev_priv = to_i915(dev); intel_wakeref_t wakeref; - u32 hw_level; - int ret; + int ret = 0; - wakeref = intel_runtime_pm_get(dev_priv); - drm_modeset_lock(&dev->mode_config.connection_mutex, NULL); + with_intel_runtime_pm(dev_priv, wakeref) { + u32 hw_level; - hw_level = intel_panel_get_backlight(connector); - ret = scale_hw_to_user(connector, hw_level, bd->props.max_brightness); + drm_modeset_lock(&dev->mode_config.connection_mutex, NULL); - drm_modeset_unlock(&dev->mode_config.connection_mutex); - intel_runtime_pm_put(dev_priv, wakeref); + hw_level = intel_panel_get_backlight(connector); + ret = scale_hw_to_user(connector, + hw_level, bd->props.max_brightness); + + drm_modeset_unlock(&dev->mode_config.connection_mutex); + } return ret; } diff --git a/drivers/gpu/drm/i915/intel_uncore.c b/drivers/gpu/drm/i915/intel_uncore.c index d494d92da02c..681ea532585e 100644 --- a/drivers/gpu/drm/i915/intel_uncore.c +++ b/drivers/gpu/drm/i915/intel_uncore.c @@ -1696,21 +1696,21 @@ int i915_reg_read_ioctl(struct drm_device *dev, flags = reg->offset & (entry->size - 1); - wakeref = intel_runtime_pm_get(dev_priv); - if (entry->size == 8 && flags == I915_REG_READ_8B_WA) - reg->val = I915_READ64_2x32(entry->offset_ldw, - entry->offset_udw); - else if (entry->size == 8 && flags == 0) - reg->val = I915_READ64(entry->offset_ldw); - else if (entry->size == 4 && flags == 0) - reg->val = I915_READ(entry->offset_ldw); - else if (entry->size == 2 && flags == 0) - reg->val = I915_READ16(entry->offset_ldw); - else if (entry->size == 1 && flags == 0) - reg->val = I915_READ8(entry->offset_ldw); - else - ret = -EINVAL; - intel_runtime_pm_put(dev_priv, wakeref); + with_intel_runtime_pm(dev_priv, wakeref) { + if (entry->size == 8 && flags == I915_REG_READ_8B_WA) + reg->val = I915_READ64_2x32(entry->offset_ldw, + entry->offset_udw); + else if (entry->size == 8 && flags == 0) + reg->val = I915_READ64(entry->offset_ldw); + else if (entry->size == 4 && flags == 0) + reg->val = I915_READ(entry->offset_ldw); + else if (entry->size == 2 && flags == 0) + reg->val = I915_READ16(entry->offset_ldw); + else if (entry->size == 1 && flags == 0) + reg->val = I915_READ8(entry->offset_ldw); + else + ret = -EINVAL; + } return ret; } diff --git a/drivers/gpu/drm/i915/selftests/i915_gem.c b/drivers/gpu/drm/i915/selftests/i915_gem.c index 01a46c46fe25..e77b7ed449ae 100644 --- a/drivers/gpu/drm/i915/selftests/i915_gem.c +++ b/drivers/gpu/drm/i915/selftests/i915_gem.c @@ -98,26 +98,22 @@ static void pm_suspend(struct drm_i915_private *i915) { intel_wakeref_t wakeref; - wakeref = intel_runtime_pm_get(i915); - - i915_gem_suspend_gtt_mappings(i915); - i915_gem_suspend_late(i915); - - intel_runtime_pm_put(i915, wakeref); + with_intel_runtime_pm(i915, wakeref) { + i915_gem_suspend_gtt_mappings(i915); + i915_gem_suspend_late(i915); + } } static void pm_hibernate(struct drm_i915_private *i915) { intel_wakeref_t wakeref; - wakeref = intel_runtime_pm_get(i915); - - i915_gem_suspend_gtt_mappings(i915); - - i915_gem_freeze(i915); - i915_gem_freeze_late(i915); + with_intel_runtime_pm(i915, wakeref) { + i915_gem_suspend_gtt_mappings(i915); - intel_runtime_pm_put(i915, wakeref); + i915_gem_freeze(i915); + i915_gem_freeze_late(i915); + } } static void pm_resume(struct drm_i915_private *i915) @@ -128,13 +124,11 @@ static void pm_resume(struct drm_i915_private *i915) * Both suspend and hibernate follow the same wakeup path and assume * that runtime-pm just works. */ - wakeref = intel_runtime_pm_get(i915); - - intel_engines_sanitize(i915, false); - i915_gem_sanitize(i915); - i915_gem_resume(i915); - - intel_runtime_pm_put(i915, wakeref); + with_intel_runtime_pm(i915, wakeref) { + intel_engines_sanitize(i915, false); + i915_gem_sanitize(i915); + i915_gem_resume(i915); + } } static int igt_gem_suspend(void *arg) diff --git a/drivers/gpu/drm/i915/selftests/i915_gem_context.c b/drivers/gpu/drm/i915/selftests/i915_gem_context.c index 7a9b1f20b019..4cba50679607 100644 --- a/drivers/gpu/drm/i915/selftests/i915_gem_context.c +++ b/drivers/gpu/drm/i915/selftests/i915_gem_context.c @@ -610,9 +610,9 @@ static int igt_ctx_exec(void *arg) } } - wakeref = intel_runtime_pm_get(i915); - err = gpu_fill(obj, ctx, engine, dw); - intel_runtime_pm_put(i915, wakeref); + err = 0; + with_intel_runtime_pm(i915, wakeref) + err = gpu_fill(obj, ctx, engine, dw); if (err) { pr_err("Failed to fill dword %lu [%lu/%lu] with gpu (%s) in ctx %u [full-ppgtt? %s], err=%d\n", ndwords, dw, max_dwords(obj), @@ -718,9 +718,9 @@ static int igt_ctx_readonly(void *arg) i915_gem_object_set_readonly(obj); } - wakeref = intel_runtime_pm_get(i915); - err = gpu_fill(obj, ctx, engine, dw); - intel_runtime_pm_put(i915, wakeref); + err = 0; + with_intel_runtime_pm(i915, wakeref) + err = gpu_fill(obj, ctx, engine, dw); if (err) { pr_err("Failed to fill dword %lu [%lu/%lu] with gpu (%s) in ctx %u [full-ppgtt? %s], err=%d\n", ndwords, dw, max_dwords(obj), diff --git a/drivers/gpu/drm/i915/selftests/i915_gem_evict.c b/drivers/gpu/drm/i915/selftests/i915_gem_evict.c index bbcbf11c72b3..067e5dfa0a24 100644 --- a/drivers/gpu/drm/i915/selftests/i915_gem_evict.c +++ b/drivers/gpu/drm/i915/selftests/i915_gem_evict.c @@ -484,18 +484,16 @@ int i915_gem_evict_mock_selftests(void) }; struct drm_i915_private *i915; intel_wakeref_t wakeref; - int err; + int err = 0; i915 = mock_gem_device(); if (!i915) return -ENOMEM; mutex_lock(&i915->drm.struct_mutex); - wakeref = intel_runtime_pm_get(i915); + with_intel_runtime_pm(i915, wakeref) + err = i915_subtests(tests, i915); - err = i915_subtests(tests, i915); - - intel_runtime_pm_put(i915, wakeref); mutex_unlock(&i915->drm.struct_mutex); drm_dev_put(&i915->drm); diff --git a/drivers/gpu/drm/i915/selftests/i915_gem_object.c b/drivers/gpu/drm/i915/selftests/i915_gem_object.c index 3575e1387c3f..395ae878e0f7 100644 --- a/drivers/gpu/drm/i915/selftests/i915_gem_object.c +++ b/drivers/gpu/drm/i915/selftests/i915_gem_object.c @@ -509,9 +509,8 @@ static void disable_retire_worker(struct drm_i915_private *i915) if (!i915->gt.active_requests++) { intel_wakeref_t wakeref; - wakeref = intel_runtime_pm_get(i915); - i915_gem_unpark(i915); - intel_runtime_pm_put(i915, wakeref); + with_intel_runtime_pm(i915, wakeref) + i915_gem_unpark(i915); } mutex_unlock(&i915->drm.struct_mutex); @@ -593,10 +592,10 @@ static int igt_mmap_offset_exhaustion(void *arg) goto out; } + err = 0; mutex_lock(&i915->drm.struct_mutex); - wakeref = intel_runtime_pm_get(i915); - err = make_obj_busy(obj); - intel_runtime_pm_put(i915, wakeref); + with_intel_runtime_pm(i915, wakeref) + err = make_obj_busy(obj); mutex_unlock(&i915->drm.struct_mutex); if (err) { pr_err("[loop %d] Failed to busy the object\n", loop); diff --git a/drivers/gpu/drm/i915/selftests/i915_request.c b/drivers/gpu/drm/i915/selftests/i915_request.c index 9f705ff9423f..2e14d6d3bad7 100644 --- a/drivers/gpu/drm/i915/selftests/i915_request.c +++ b/drivers/gpu/drm/i915/selftests/i915_request.c @@ -256,17 +256,15 @@ int i915_request_mock_selftests(void) }; struct drm_i915_private *i915; intel_wakeref_t wakeref; - int err; + int err = 0; i915 = mock_gem_device(); if (!i915) return -ENOMEM; - wakeref = intel_runtime_pm_get(i915); + with_intel_runtime_pm(i915, wakeref) + err = i915_subtests(tests, i915); - err = i915_subtests(tests, i915); - - intel_runtime_pm_put(i915, wakeref); drm_dev_put(&i915->drm); return err; diff --git a/drivers/gpu/drm/i915/selftests/intel_workarounds.c b/drivers/gpu/drm/i915/selftests/intel_workarounds.c index 75324b6249e3..9009d7b8b136 100644 --- a/drivers/gpu/drm/i915/selftests/intel_workarounds.c +++ b/drivers/gpu/drm/i915/selftests/intel_workarounds.c @@ -93,9 +93,9 @@ read_nonprivs(struct i915_gem_context *ctx, struct intel_engine_cs *engine) if (err) goto err_obj; - wakeref = intel_runtime_pm_get(engine->i915); - rq = i915_request_alloc(engine, ctx); - intel_runtime_pm_put(engine->i915, wakeref); + rq = ERR_PTR(-ENODEV); + with_intel_runtime_pm(engine->i915, wakeref) + rq = i915_request_alloc(engine, ctx); if (IS_ERR(rq)) { err = PTR_ERR(rq); goto err_pin; @@ -236,14 +236,15 @@ switch_to_scratch_context(struct intel_engine_cs *engine, if (IS_ERR(ctx)) return PTR_ERR(ctx); - wakeref = intel_runtime_pm_get(engine->i915); - - if (spin) - rq = igt_spinner_create_request(spin, ctx, engine, MI_NOOP); - else - rq = i915_request_alloc(engine, ctx); - - intel_runtime_pm_put(engine->i915, wakeref); + rq = ERR_PTR(-ENODEV); + with_intel_runtime_pm(engine->i915, wakeref) { + if (spin) + rq = igt_spinner_create_request(spin, + ctx, engine, + MI_NOOP); + else + rq = i915_request_alloc(engine, ctx); + } kernel_context_close(ctx); @@ -301,9 +302,8 @@ static int check_whitelist_across_reset(struct intel_engine_cs *engine, if (err) goto out; - wakeref = intel_runtime_pm_get(i915); - err = reset(engine); - intel_runtime_pm_put(i915, wakeref); + with_intel_runtime_pm(i915, wakeref) + err = reset(engine); if (want_spin) { igt_spinner_end(&spin); -- cgit v1.2.3