summaryrefslogtreecommitdiff
path: root/drivers/gpu/drm/msm/adreno/a6xx_gmu.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/drm/msm/adreno/a6xx_gmu.c')
-rw-r--r--drivers/gpu/drm/msm/adreno/a6xx_gmu.c107
1 files changed, 58 insertions, 49 deletions
diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gmu.c b/drivers/gpu/drm/msm/adreno/a6xx_gmu.c
index 21e77d67151f..b67b38c8fadf 100644
--- a/drivers/gpu/drm/msm/adreno/a6xx_gmu.c
+++ b/drivers/gpu/drm/msm/adreno/a6xx_gmu.c
@@ -103,17 +103,45 @@ bool a6xx_gmu_gx_is_on(struct a6xx_gmu *gmu)
A6XX_GMU_SPTPRAC_PWR_CLK_STATUS_GX_HM_CLK_OFF));
}
-static void __a6xx_gmu_set_freq(struct a6xx_gmu *gmu, int index)
+void a6xx_gmu_set_freq(struct msm_gpu *gpu, struct dev_pm_opp *opp)
{
- struct a6xx_gpu *a6xx_gpu = container_of(gmu, struct a6xx_gpu, gmu);
- struct adreno_gpu *adreno_gpu = &a6xx_gpu->base;
- struct msm_gpu *gpu = &adreno_gpu->base;
- int ret;
+ struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
+ struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu);
+ struct a6xx_gmu *gmu = &a6xx_gpu->gmu;
+ u32 perf_index;
+ unsigned long gpu_freq;
+ int ret = 0;
+
+ gpu_freq = dev_pm_opp_get_freq(opp);
+
+ if (gpu_freq == gmu->freq)
+ return;
+
+ for (perf_index = 0; perf_index < gmu->nr_gpu_freqs - 1; perf_index++)
+ if (gpu_freq == gmu->gpu_freqs[perf_index])
+ break;
+
+ gmu->current_perf_index = perf_index;
+ gmu->freq = gmu->gpu_freqs[perf_index];
+
+ /*
+ * This can get called from devfreq while the hardware is idle. Don't
+ * bring up the power if it isn't already active
+ */
+ if (pm_runtime_get_if_in_use(gmu->dev) == 0)
+ return;
+
+ if (!gmu->legacy) {
+ a6xx_hfi_set_freq(gmu, perf_index);
+ icc_set_bw(gpu->icc_path, 0, MBps_to_icc(7216));
+ pm_runtime_put(gmu->dev);
+ return;
+ }
gmu_write(gmu, REG_A6XX_GMU_DCVS_ACK_OPTION, 0);
gmu_write(gmu, REG_A6XX_GMU_DCVS_PERF_SETTING,
- ((3 & 0xf) << 28) | index);
+ ((3 & 0xf) << 28) | perf_index);
/*
* Send an invalid index as a vote for the bus bandwidth and let the
@@ -134,37 +162,6 @@ static void __a6xx_gmu_set_freq(struct a6xx_gmu *gmu, int index)
* for now leave it at max so that the performance is nominal.
*/
icc_set_bw(gpu->icc_path, 0, MBps_to_icc(7216));
-}
-
-void a6xx_gmu_set_freq(struct msm_gpu *gpu, unsigned long freq)
-{
- struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
- struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu);
- struct a6xx_gmu *gmu = &a6xx_gpu->gmu;
- u32 perf_index = 0;
-
- if (freq == gmu->freq)
- return;
-
- for (perf_index = 0; perf_index < gmu->nr_gpu_freqs - 1; perf_index++)
- if (freq == gmu->gpu_freqs[perf_index])
- break;
-
- gmu->current_perf_index = perf_index;
- gmu->freq = gmu->gpu_freqs[perf_index];
-
- /*
- * This can get called from devfreq while the hardware is idle. Don't
- * bring up the power if it isn't already active
- */
- if (pm_runtime_get_if_in_use(gmu->dev) == 0)
- return;
-
- if (gmu->legacy)
- __a6xx_gmu_set_freq(gmu, perf_index);
- else
- a6xx_hfi_set_freq(gmu, perf_index);
-
pm_runtime_put(gmu->dev);
}
@@ -839,6 +836,19 @@ static void a6xx_gmu_force_off(struct a6xx_gmu *gmu)
a6xx_gmu_rpmh_off(gmu);
}
+static void a6xx_gmu_set_initial_freq(struct msm_gpu *gpu, struct a6xx_gmu *gmu)
+{
+ struct dev_pm_opp *gpu_opp;
+ unsigned long gpu_freq = gmu->gpu_freqs[gmu->current_perf_index];
+
+ gpu_opp = dev_pm_opp_find_freq_exact(&gpu->pdev->dev, gpu_freq, true);
+ if (IS_ERR_OR_NULL(gpu_opp))
+ return;
+
+ a6xx_gmu_set_freq(gpu, gpu_opp);
+ dev_pm_opp_put(gpu_opp);
+}
+
int a6xx_gmu_resume(struct a6xx_gpu *a6xx_gpu)
{
struct adreno_gpu *adreno_gpu = &a6xx_gpu->base;
@@ -854,10 +864,19 @@ int a6xx_gmu_resume(struct a6xx_gpu *a6xx_gpu)
/* Turn on the resources */
pm_runtime_get_sync(gmu->dev);
+ /*
+ * "enable" the GX power domain which won't actually do anything but it
+ * will make sure that the refcounting is correct in case we need to
+ * bring down the GX after a GMU failure
+ */
+ if (!IS_ERR_OR_NULL(gmu->gxpd))
+ pm_runtime_get_sync(gmu->gxpd);
+
/* Use a known rate to bring up the GMU */
clk_set_rate(gmu->core_clk, 200000000);
ret = clk_bulk_prepare_enable(gmu->nr_clocks, gmu->clocks);
if (ret) {
+ pm_runtime_put(gmu->gxpd);
pm_runtime_put(gmu->dev);
return ret;
}
@@ -898,24 +917,14 @@ int a6xx_gmu_resume(struct a6xx_gpu *a6xx_gpu)
enable_irq(gmu->hfi_irq);
/* Set the GPU to the current freq */
- if (gmu->legacy)
- __a6xx_gmu_set_freq(gmu, gmu->current_perf_index);
- else
- a6xx_hfi_set_freq(gmu, gmu->current_perf_index);
-
- /*
- * "enable" the GX power domain which won't actually do anything but it
- * will make sure that the refcounting is correct in case we need to
- * bring down the GX after a GMU failure
- */
- if (!IS_ERR_OR_NULL(gmu->gxpd))
- pm_runtime_get(gmu->gxpd);
+ a6xx_gmu_set_initial_freq(gpu, gmu);
out:
/* On failure, shut down the GMU to leave it in a good state */
if (ret) {
disable_irq(gmu->gmu_irq);
a6xx_rpmh_stop(gmu);
+ pm_runtime_put(gmu->gxpd);
pm_runtime_put(gmu->dev);
}