summaryrefslogtreecommitdiff
path: root/drivers/gpu/drm
diff options
context:
space:
mode:
authorRob Clark <robdclark@chromium.org>2019-08-20 16:12:28 -0700
committerRob Clark <robdclark@chromium.org>2019-09-03 16:16:58 -0700
commit241b507c166fef3e461e5daf562d8e41aa41bf15 (patch)
treed131a04ee9fc4b6d7db374fa89a351e8e1f9d894 /drivers/gpu/drm
parented98359a9e3947141c31a51e8bb7752da24ae921 (diff)
drm/msm/dpu: fix "frame done" timeouts
Previously, dpu_crtc_frame_event_work() would try to aquire all the modeset locks in order to check whether it can release bandwidth. (If we only have cmd-mode display, bandwidth can be released at frame-done time.) The problem with this is that it is also responsible for signalling frame_done_comp, which dpu_crtc_commit_kickoff() waits on if there is already a frame pending. This is called in the msm_atomic_commit_tail() path.. which means that for non-nonblock commits, at least some of the modeset locks are already held. Re-work this scheme to use a reference count to track our need to have clocks enabled. It is incremented for each atomic commit, and decremented in the corresponding frame-done. Additionally, any crtc used in video mode hold an extra reference while they are enabled. The net effect is that we can determine in frame-done whether it is safe to drop bandwidth without needing to aquire any modeset locks. Signed-off-by: Rob Clark <robdclark@chromium.org> Reviewed-by: Sean Paul <sean@chromium.org> Signed-off-by: Rob Clark <robdclark@chromium.org>
Diffstat (limited to 'drivers/gpu/drm')
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_core_perf.c16
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c45
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_kms.c2
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_kms.h8
4 files changed, 38 insertions, 33 deletions
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_core_perf.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_core_perf.c
index 5cda96875e03..09a49b59bb5b 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_core_perf.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_core_perf.c
@@ -214,7 +214,6 @@ static int _dpu_core_perf_crtc_update_bus(struct dpu_kms *kms,
*/
void dpu_core_perf_crtc_release_bw(struct drm_crtc *crtc)
{
- struct drm_crtc *tmp_crtc;
struct dpu_crtc *dpu_crtc;
struct dpu_crtc_state *dpu_cstate;
struct dpu_kms *kms;
@@ -233,22 +232,9 @@ void dpu_core_perf_crtc_release_bw(struct drm_crtc *crtc)
dpu_crtc = to_dpu_crtc(crtc);
dpu_cstate = to_dpu_crtc_state(crtc->state);
- /* only do this for command mode rt client */
- if (dpu_crtc_get_intf_mode(crtc) != INTF_MODE_CMD)
+ if (atomic_dec_return(&kms->bandwidth_ref) > 0)
return;
- /*
- * If video interface present, cmd panel bandwidth cannot be
- * released.
- */
- if (dpu_crtc_get_intf_mode(crtc) == INTF_MODE_CMD)
- drm_for_each_crtc(tmp_crtc, crtc->dev) {
- if (tmp_crtc->enabled &&
- dpu_crtc_get_intf_mode(tmp_crtc) ==
- INTF_MODE_VIDEO)
- return;
- }
-
/* Release the bandwidth */
if (kms->perf.enable_bw_release) {
trace_dpu_cmd_release_bw(crtc->base.id);
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c
index ff71e2c60389..68d27a1bb2a4 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c
@@ -294,19 +294,6 @@ void dpu_crtc_vblank_callback(struct drm_crtc *crtc)
trace_dpu_crtc_vblank_cb(DRMID(crtc));
}
-static void dpu_crtc_release_bw_unlocked(struct drm_crtc *crtc)
-{
- int ret = 0;
- struct drm_modeset_acquire_ctx ctx;
-
- DRM_MODESET_LOCK_ALL_BEGIN(crtc->dev, ctx, 0, ret);
- dpu_core_perf_crtc_release_bw(crtc);
- DRM_MODESET_LOCK_ALL_END(ctx, ret);
- if (ret)
- DRM_ERROR("Failed to acquire modeset locks to release bw, %d\n",
- ret);
-}
-
static void dpu_crtc_frame_event_work(struct kthread_work *work)
{
struct dpu_crtc_frame_event *fevent = container_of(work,
@@ -336,7 +323,7 @@ static void dpu_crtc_frame_event_work(struct kthread_work *work)
/* release bandwidth and other resources */
trace_dpu_crtc_frame_event_done(DRMID(crtc),
fevent->event);
- dpu_crtc_release_bw_unlocked(crtc);
+ dpu_core_perf_crtc_release_bw(crtc);
} else {
trace_dpu_crtc_frame_event_more_pending(DRMID(crtc),
fevent->event);
@@ -652,7 +639,7 @@ void dpu_crtc_commit_kickoff(struct drm_crtc *crtc, bool async)
dpu_encoder_prepare_for_kickoff(encoder, async);
if (!async) {
- /* wait for frame_event_done completion */
+ /* wait for previous frame_event_done completion */
DPU_ATRACE_BEGIN("wait_for_frame_done_event");
ret = _dpu_crtc_wait_for_frame_done(crtc);
DPU_ATRACE_END("wait_for_frame_done_event");
@@ -731,6 +718,7 @@ static void dpu_crtc_disable(struct drm_crtc *crtc,
struct drm_encoder *encoder;
struct msm_drm_private *priv;
unsigned long flags;
+ bool release_bandwidth = false;
if (!crtc || !crtc->dev || !crtc->dev->dev_private || !crtc->state) {
DPU_ERROR("invalid crtc\n");
@@ -747,8 +735,15 @@ static void dpu_crtc_disable(struct drm_crtc *crtc,
drm_crtc_vblank_off(crtc);
drm_for_each_encoder_mask(encoder, crtc->dev,
- old_crtc_state->encoder_mask)
+ old_crtc_state->encoder_mask) {
+ /* in video mode, we hold an extra bandwidth reference
+ * as we cannot drop bandwidth at frame-done if any
+ * crtc is being used in video mode.
+ */
+ if (dpu_encoder_get_intf_mode(encoder) == INTF_MODE_VIDEO)
+ release_bandwidth = true;
dpu_encoder_assign_crtc(encoder, NULL);
+ }
/* wait for frame_event_done completion */
if (_dpu_crtc_wait_for_frame_done(crtc))
@@ -762,7 +757,8 @@ static void dpu_crtc_disable(struct drm_crtc *crtc,
if (atomic_read(&dpu_crtc->frame_pending)) {
trace_dpu_crtc_disable_frame_pending(DRMID(crtc),
atomic_read(&dpu_crtc->frame_pending));
- dpu_core_perf_crtc_release_bw(crtc);
+ if (release_bandwidth)
+ dpu_core_perf_crtc_release_bw(crtc);
atomic_set(&dpu_crtc->frame_pending, 0);
}
@@ -794,6 +790,7 @@ static void dpu_crtc_enable(struct drm_crtc *crtc,
struct dpu_crtc *dpu_crtc;
struct drm_encoder *encoder;
struct msm_drm_private *priv;
+ bool request_bandwidth;
if (!crtc || !crtc->dev || !crtc->dev->dev_private) {
DPU_ERROR("invalid crtc\n");
@@ -806,9 +803,19 @@ static void dpu_crtc_enable(struct drm_crtc *crtc,
DRM_DEBUG_KMS("crtc%d\n", crtc->base.id);
dpu_crtc = to_dpu_crtc(crtc);
- drm_for_each_encoder_mask(encoder, crtc->dev, crtc->state->encoder_mask)
+ drm_for_each_encoder_mask(encoder, crtc->dev, crtc->state->encoder_mask) {
+ /* in video mode, we hold an extra bandwidth reference
+ * as we cannot drop bandwidth at frame-done if any
+ * crtc is being used in video mode.
+ */
+ if (dpu_encoder_get_intf_mode(encoder) == INTF_MODE_VIDEO)
+ request_bandwidth = true;
dpu_encoder_register_frame_event_callback(encoder,
dpu_crtc_frame_event_cb, (void *)crtc);
+ }
+
+ if (request_bandwidth)
+ atomic_inc(&_dpu_crtc_get_kms(crtc)->bandwidth_ref);
trace_dpu_crtc_enable(DRMID(crtc), true, dpu_crtc);
dpu_crtc->enabled = true;
@@ -983,6 +990,8 @@ static int dpu_crtc_atomic_check(struct drm_crtc *crtc,
}
}
+ atomic_inc(&_dpu_crtc_get_kms(crtc)->bandwidth_ref);
+
rc = dpu_core_perf_crtc_check(crtc, state);
if (rc) {
DPU_ERROR("crtc%d failed performance check %d\n",
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.c
index fb635c027c2a..9520c4578ac6 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.c
@@ -802,6 +802,8 @@ static int dpu_kms_hw_init(struct msm_kms *kms)
return rc;
}
+ atomic_set(&dpu_kms->bandwidth_ref, 0);
+
dpu_kms->mmio = msm_ioremap(dpu_kms->pdev, "mdp", "mdp");
if (IS_ERR(dpu_kms->mmio)) {
rc = PTR_ERR(dpu_kms->mmio);
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.h
index 44f1635be257..4c889aabdaf9 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.h
@@ -122,6 +122,14 @@ struct dpu_kms {
struct platform_device *pdev;
bool rpm_enabled;
struct dss_module_power mp;
+
+ /* reference count bandwidth requests, so we know when we can
+ * release bandwidth. Each atomic update increments, and frame-
+ * done event decrements. Additionally, for video mode, the
+ * reference is incremented when crtc is enabled, and decremented
+ * when disabled.
+ */
+ atomic_t bandwidth_ref;
};
struct vsync_info {