From 8724d5720900d10935a01237832b9670c5c531a4 Mon Sep 17 00:00:00 2001 From: Drew Davenport Date: Wed, 19 Feb 2020 10:42:24 -0700 Subject: drm/msm/dpu: Remove unused function arguments Several functions arguments in the resource manager are unused, so remove them. Signed-off-by: Drew Davenport Reviewed-by: Stephen Boyd Signed-off-by: Rob Clark --- drivers/gpu/drm/msm/disp/dpu1/dpu_rm.c | 37 +++++++++++++--------------------- 1 file changed, 14 insertions(+), 23 deletions(-) diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_rm.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_rm.c index 23f5b1433b35..dea1dba441fe 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_rm.c +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_rm.c @@ -144,8 +144,7 @@ static int _dpu_rm_hw_blk_create( const struct dpu_mdss_cfg *cat, void __iomem *mmio, enum dpu_hw_blk_type type, - uint32_t id, - const void *hw_catalog_info) + uint32_t id) { struct dpu_rm_hw_blk *blk; void *hw; @@ -223,7 +222,7 @@ int dpu_rm_init(struct dpu_rm *rm, } rc = _dpu_rm_hw_blk_create(rm, cat, mmio, DPU_HW_BLK_LM, - cat->mixer[i].id, &cat->mixer[i]); + cat->mixer[i].id); if (rc) { DPU_ERROR("failed: lm hw not available\n"); goto fail; @@ -244,7 +243,7 @@ int dpu_rm_init(struct dpu_rm *rm, for (i = 0; i < cat->pingpong_count; i++) { rc = _dpu_rm_hw_blk_create(rm, cat, mmio, DPU_HW_BLK_PINGPONG, - cat->pingpong[i].id, &cat->pingpong[i]); + cat->pingpong[i].id); if (rc) { DPU_ERROR("failed: pp hw not available\n"); goto fail; @@ -258,7 +257,7 @@ int dpu_rm_init(struct dpu_rm *rm, } rc = _dpu_rm_hw_blk_create(rm, cat, mmio, DPU_HW_BLK_INTF, - cat->intf[i].id, &cat->intf[i]); + cat->intf[i].id); if (rc) { DPU_ERROR("failed: intf hw not available\n"); goto fail; @@ -267,7 +266,7 @@ int dpu_rm_init(struct dpu_rm *rm, for (i = 0; i < cat->ctl_count; i++) { rc = _dpu_rm_hw_blk_create(rm, cat, mmio, DPU_HW_BLK_CTL, - cat->ctl[i].id, &cat->ctl[i]); + cat->ctl[i].id); if (rc) { DPU_ERROR("failed: ctl hw not available\n"); goto fail; @@ -293,7 +292,6 @@ static bool _dpu_rm_needs_split_display(const struct msm_display_topology *top) * pingpong * @rm: dpu resource manager handle * @enc_id: encoder id requesting for allocation - * @reqs: proposed use case requirements * @lm: proposed layer mixer, function checks if lm, and all other hardwired * blocks connected to the lm (pp) is available and appropriate * @pp: output parameter, pingpong block attached to the layer mixer. @@ -305,7 +303,6 @@ static bool _dpu_rm_needs_split_display(const struct msm_display_topology *top) static bool _dpu_rm_check_lm_and_get_connected_blks( struct dpu_rm *rm, uint32_t enc_id, - struct dpu_rm_requirements *reqs, struct dpu_rm_hw_blk *lm, struct dpu_rm_hw_blk **pp, struct dpu_rm_hw_blk *primary_lm) @@ -384,7 +381,7 @@ static int _dpu_rm_reserve_lms(struct dpu_rm *rm, uint32_t enc_id, lm[lm_count] = iter_i.blk; if (!_dpu_rm_check_lm_and_get_connected_blks( - rm, enc_id, reqs, lm[lm_count], + rm, enc_id, lm[lm_count], &pp[lm_count], NULL)) continue; @@ -399,7 +396,7 @@ static int _dpu_rm_reserve_lms(struct dpu_rm *rm, uint32_t enc_id, continue; if (!_dpu_rm_check_lm_and_get_connected_blks( - rm, enc_id, reqs, iter_j.blk, + rm, enc_id, iter_j.blk, &pp[lm_count], iter_i.blk)) continue; @@ -480,20 +477,19 @@ static int _dpu_rm_reserve_ctls( static int _dpu_rm_reserve_intf( struct dpu_rm *rm, uint32_t enc_id, - uint32_t id, - enum dpu_hw_blk_type type) + uint32_t id) { struct dpu_rm_hw_iter iter; int ret = 0; /* Find the block entry in the rm, and note the reservation */ - dpu_rm_init_hw_iter(&iter, 0, type); + dpu_rm_init_hw_iter(&iter, 0, DPU_HW_BLK_INTF); while (_dpu_rm_get_hw_locked(rm, &iter)) { if (iter.blk->id != id) continue; if (RESERVED_BY_OTHER(iter.blk, enc_id)) { - DPU_ERROR("type %d id %d already reserved\n", type, id); + DPU_ERROR("intf id %d already reserved\n", id); return -ENAVAIL; } @@ -504,7 +500,7 @@ static int _dpu_rm_reserve_intf( /* Shouldn't happen since intfs are fixed at probe */ if (!iter.hw) { - DPU_ERROR("couldn't find type %d id %d\n", type, id); + DPU_ERROR("couldn't find intf id %d\n", id); return -EINVAL; } @@ -523,8 +519,7 @@ static int _dpu_rm_reserve_intf_related_hw( if (hw_res->intfs[i] == INTF_MODE_NONE) continue; id = i + INTF_0; - ret = _dpu_rm_reserve_intf(rm, enc_id, id, - DPU_HW_BLK_INTF); + ret = _dpu_rm_reserve_intf(rm, enc_id, id); if (ret) return ret; } @@ -535,7 +530,6 @@ static int _dpu_rm_reserve_intf_related_hw( static int _dpu_rm_make_reservation( struct dpu_rm *rm, struct drm_encoder *enc, - struct drm_crtc_state *crtc_state, struct dpu_rm_requirements *reqs) { int ret; @@ -560,9 +554,7 @@ static int _dpu_rm_make_reservation( } static int _dpu_rm_populate_requirements( - struct dpu_rm *rm, struct drm_encoder *enc, - struct drm_crtc_state *crtc_state, struct dpu_rm_requirements *reqs, struct msm_display_topology req_topology) { @@ -621,14 +613,13 @@ int dpu_rm_reserve( mutex_lock(&rm->rm_lock); - ret = _dpu_rm_populate_requirements(rm, enc, crtc_state, &reqs, - topology); + ret = _dpu_rm_populate_requirements(enc, &reqs, topology); if (ret) { DPU_ERROR("failed to populate hw requirements\n"); goto end; } - ret = _dpu_rm_make_reservation(rm, enc, crtc_state, &reqs); + ret = _dpu_rm_make_reservation(rm, enc, &reqs); if (ret) { DPU_ERROR("failed to reserve hw resources: %d\n", ret); _dpu_rm_release_reservation(rm, enc->base.id); -- cgit v1.2.3 From b954fa6baaca7ac171224ae4bcbd7c0e54016cd5 Mon Sep 17 00:00:00 2001 From: Drew Davenport Date: Wed, 19 Feb 2020 10:42:25 -0700 Subject: drm/msm/dpu: Refactor rm iterator Make iterator implementation private, and add function to query resources assigned to an encoder. Signed-off-by: Drew Davenport Signed-off-by: Rob Clark --- drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c | 59 ++++++++++--------------- drivers/gpu/drm/msm/disp/dpu1/dpu_hw_intf.h | 10 +++++ drivers/gpu/drm/msm/disp/dpu1/dpu_hw_pingpong.h | 10 +++++ drivers/gpu/drm/msm/disp/dpu1/dpu_rm.c | 31 ++++++++++++- drivers/gpu/drm/msm/disp/dpu1/dpu_rm.h | 47 ++------------------ 5 files changed, 76 insertions(+), 81 deletions(-) diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c index f8ac3bf60fd6..6cadeff456f0 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c @@ -957,11 +957,11 @@ static void dpu_encoder_virt_mode_set(struct drm_encoder *drm_enc, struct drm_connector *conn = NULL, *conn_iter; struct drm_crtc *drm_crtc; struct dpu_crtc_state *cstate; - struct dpu_rm_hw_iter hw_iter; struct msm_display_topology topology; - struct dpu_hw_ctl *hw_ctl[MAX_CHANNELS_PER_ENC] = { NULL }; - struct dpu_hw_mixer *hw_lm[MAX_CHANNELS_PER_ENC] = { NULL }; - int num_lm = 0, num_ctl = 0; + struct dpu_hw_blk *hw_pp[MAX_CHANNELS_PER_ENC]; + struct dpu_hw_blk *hw_ctl[MAX_CHANNELS_PER_ENC]; + struct dpu_hw_blk *hw_lm[MAX_CHANNELS_PER_ENC]; + int num_lm, num_ctl, num_pp; int i, j, ret; if (!drm_enc) { @@ -1005,42 +1005,31 @@ static void dpu_encoder_virt_mode_set(struct drm_encoder *drm_enc, return; } - dpu_rm_init_hw_iter(&hw_iter, drm_enc->base.id, DPU_HW_BLK_PINGPONG); - for (i = 0; i < MAX_CHANNELS_PER_ENC; i++) { - dpu_enc->hw_pp[i] = NULL; - if (!dpu_rm_get_hw(&dpu_kms->rm, &hw_iter)) - break; - dpu_enc->hw_pp[i] = (struct dpu_hw_pingpong *) hw_iter.hw; - } - - dpu_rm_init_hw_iter(&hw_iter, drm_enc->base.id, DPU_HW_BLK_CTL); - for (i = 0; i < MAX_CHANNELS_PER_ENC; i++) { - if (!dpu_rm_get_hw(&dpu_kms->rm, &hw_iter)) - break; - hw_ctl[i] = (struct dpu_hw_ctl *)hw_iter.hw; - num_ctl++; - } + num_pp = dpu_rm_get_assigned_resources(&dpu_kms->rm, drm_enc->base.id, + DPU_HW_BLK_PINGPONG, hw_pp, ARRAY_SIZE(hw_pp)); + num_ctl = dpu_rm_get_assigned_resources(&dpu_kms->rm, drm_enc->base.id, + DPU_HW_BLK_CTL, hw_ctl, ARRAY_SIZE(hw_ctl)); + num_lm = dpu_rm_get_assigned_resources(&dpu_kms->rm, drm_enc->base.id, + DPU_HW_BLK_LM, hw_lm, ARRAY_SIZE(hw_lm)); - dpu_rm_init_hw_iter(&hw_iter, drm_enc->base.id, DPU_HW_BLK_LM); - for (i = 0; i < MAX_CHANNELS_PER_ENC; i++) { - if (!dpu_rm_get_hw(&dpu_kms->rm, &hw_iter)) - break; - hw_lm[i] = (struct dpu_hw_mixer *)hw_iter.hw; - num_lm++; - } + for (i = 0; i < MAX_CHANNELS_PER_ENC; i++) + dpu_enc->hw_pp[i] = i < num_pp ? to_dpu_hw_pingpong(hw_pp[i]) + : NULL; cstate = to_dpu_crtc_state(drm_crtc->state); for (i = 0; i < num_lm; i++) { int ctl_idx = (i < num_ctl) ? i : (num_ctl-1); - cstate->mixers[i].hw_lm = hw_lm[i]; - cstate->mixers[i].lm_ctl = hw_ctl[ctl_idx]; + cstate->mixers[i].hw_lm = to_dpu_hw_mixer(hw_lm[i]); + cstate->mixers[i].lm_ctl = to_dpu_hw_ctl(hw_ctl[ctl_idx]); } cstate->num_mixers = num_lm; for (i = 0; i < dpu_enc->num_phys_encs; i++) { + int num_blk; + struct dpu_hw_blk *hw_blk[MAX_CHANNELS_PER_ENC]; struct dpu_encoder_phys *phys = dpu_enc->phys_encs[i]; if (!dpu_enc->hw_pp[i]) { @@ -1056,17 +1045,15 @@ static void dpu_encoder_virt_mode_set(struct drm_encoder *drm_enc, } phys->hw_pp = dpu_enc->hw_pp[i]; - phys->hw_ctl = hw_ctl[i]; + phys->hw_ctl = to_dpu_hw_ctl(hw_ctl[i]); - dpu_rm_init_hw_iter(&hw_iter, drm_enc->base.id, - DPU_HW_BLK_INTF); - for (j = 0; j < MAX_CHANNELS_PER_ENC; j++) { + num_blk = dpu_rm_get_assigned_resources(&dpu_kms->rm, + drm_enc->base.id, DPU_HW_BLK_INTF, hw_blk, + ARRAY_SIZE(hw_blk)); + for (j = 0; j < num_blk; j++) { struct dpu_hw_intf *hw_intf; - if (!dpu_rm_get_hw(&dpu_kms->rm, &hw_iter)) - break; - - hw_intf = (struct dpu_hw_intf *)hw_iter.hw; + hw_intf = to_dpu_hw_intf(hw_blk[i]); if (hw_intf->idx == phys->intf_idx) phys->hw_intf = hw_intf; } diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_intf.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_intf.h index 85468981632d..0ead64d3f63d 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_intf.h +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_intf.h @@ -89,6 +89,16 @@ struct dpu_hw_intf { struct dpu_hw_intf_ops ops; }; +/** + * to_dpu_hw_intf - convert base object dpu_hw_base to container + * @hw: Pointer to base hardware block + * return: Pointer to hardware block container + */ +static inline struct dpu_hw_intf *to_dpu_hw_intf(struct dpu_hw_blk *hw) +{ + return container_of(hw, struct dpu_hw_intf, base); +} + /** * dpu_hw_intf_init(): Initializes the intf driver for the passed * interface idx. diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_pingpong.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_pingpong.h index 3d6f46b1db30..d73cb73e938b 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_pingpong.h +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_pingpong.h @@ -96,6 +96,16 @@ struct dpu_hw_pingpong { struct dpu_hw_pingpong_ops ops; }; +/** + * to_dpu_hw_pingpong - convert base object dpu_hw_base to container + * @hw: Pointer to base hardware block + * return: Pointer to hardware block container + */ +static inline struct dpu_hw_pingpong *to_dpu_hw_pingpong(struct dpu_hw_blk *hw) +{ + return container_of(hw, struct dpu_hw_pingpong, base); +} + /** * dpu_hw_pingpong_init - initializes the pingpong driver for the passed * pingpong idx. diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_rm.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_rm.c index dea1dba441fe..779df26dc81a 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_rm.c +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_rm.c @@ -40,7 +40,21 @@ struct dpu_rm_hw_blk { struct dpu_hw_blk *hw; }; -void dpu_rm_init_hw_iter( +/** + * struct dpu_rm_hw_iter - iterator for use with dpu_rm + * @hw: dpu_hw object requested, or NULL on failure + * @blk: dpu_rm internal block representation. Clients ignore. Used as iterator. + * @enc_id: DRM ID of Encoder client wishes to search for, or 0 for Any Encoder + * @type: Hardware Block Type client wishes to search for. + */ +struct dpu_rm_hw_iter { + void *hw; + struct dpu_rm_hw_blk *blk; + uint32_t enc_id; + enum dpu_hw_blk_type type; +}; + +static void dpu_rm_init_hw_iter( struct dpu_rm_hw_iter *iter, uint32_t enc_id, enum dpu_hw_blk_type type) @@ -83,7 +97,7 @@ static bool _dpu_rm_get_hw_locked(struct dpu_rm *rm, struct dpu_rm_hw_iter *i) return false; } -bool dpu_rm_get_hw(struct dpu_rm *rm, struct dpu_rm_hw_iter *i) +static bool dpu_rm_get_hw(struct dpu_rm *rm, struct dpu_rm_hw_iter *i) { bool ret; @@ -635,3 +649,16 @@ end: return ret; } + +int dpu_rm_get_assigned_resources(struct dpu_rm *rm, uint32_t enc_id, + enum dpu_hw_blk_type type, struct dpu_hw_blk **blks, int blks_size) +{ + struct dpu_rm_hw_iter hw_iter; + int num_blks = 0; + + dpu_rm_init_hw_iter(&hw_iter, enc_id, type); + while (num_blks < blks_size && dpu_rm_get_hw(rm, &hw_iter)) + blks[num_blks++] = hw_iter.blk->hw; + + return num_blks; +} diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_rm.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_rm.h index 9c580a017094..982b91e27227 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_rm.h +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_rm.h @@ -24,26 +24,6 @@ struct dpu_rm { struct mutex rm_lock; }; -/** - * struct dpu_rm_hw_blk - resource manager internal structure - * forward declaration for single iterator definition without void pointer - */ -struct dpu_rm_hw_blk; - -/** - * struct dpu_rm_hw_iter - iterator for use with dpu_rm - * @hw: dpu_hw object requested, or NULL on failure - * @blk: dpu_rm internal block representation. Clients ignore. Used as iterator. - * @enc_id: DRM ID of Encoder client wishes to search for, or 0 for Any Encoder - * @type: Hardware Block Type client wishes to search for. - */ -struct dpu_rm_hw_iter { - void *hw; - struct dpu_rm_hw_blk *blk; - uint32_t enc_id; - enum dpu_hw_blk_type type; -}; - /** * dpu_rm_init - Read hardware catalog and create reservation tracking objects * for all HW blocks. @@ -93,28 +73,9 @@ int dpu_rm_reserve(struct dpu_rm *rm, void dpu_rm_release(struct dpu_rm *rm, struct drm_encoder *enc); /** - * dpu_rm_init_hw_iter - setup given iterator for new iteration over hw list - * using dpu_rm_get_hw - * @iter: iter object to initialize - * @enc_id: DRM ID of Encoder client wishes to search for, or 0 for Any Encoder - * @type: Hardware Block Type client wishes to search for. + * Get hw resources of the given type that are assigned to this encoder. */ -void dpu_rm_init_hw_iter( - struct dpu_rm_hw_iter *iter, - uint32_t enc_id, - enum dpu_hw_blk_type type); -/** - * dpu_rm_get_hw - retrieve reserved hw object given encoder and hw type - * Meant to do a single pass through the hardware list to iteratively - * retrieve hardware blocks of a given type for a given encoder. - * Initialize an iterator object. - * Set hw block type of interest. Set encoder id of interest, 0 for any. - * Function returns first hw of type for that encoder. - * Subsequent calls will return the next reserved hw of that type in-order. - * Iterator HW pointer will be null on failure to find hw. - * @rm: DPU Resource Manager handle - * @iter: iterator object - * @Return: true on match found, false on no match found - */ -bool dpu_rm_get_hw(struct dpu_rm *rm, struct dpu_rm_hw_iter *iter); +int dpu_rm_get_assigned_resources(struct dpu_rm *rm, uint32_t enc_id, + enum dpu_hw_blk_type type, struct dpu_hw_blk **blks, int blks_size); #endif /* __DPU_RM_H__ */ + -- cgit v1.2.3 From bb00a452d6f77391441ef7df48f7115dd459cd2f Mon Sep 17 00:00:00 2001 From: Drew Davenport Date: Wed, 19 Feb 2020 10:42:26 -0700 Subject: drm/msm/dpu: Refactor resource manager Track hardware resource objects in arrays rather than a list and remove the resource manager's iterator idiom. Separate the mapping of hardware resources to an encoder ID into a different array. Use an implicit mapping between the hardware blocks' ids, which are 1-based, and array indices in these arrays to replace iteration with index lookups in several places. Signed-off-by: Drew Davenport [squash in minor compiler warning fixes] Signed-off-by: Rob Clark --- drivers/gpu/drm/msm/disp/dpu1/dpu_rm.c | 553 ++++++++++++++------------------- drivers/gpu/drm/msm/disp/dpu1/dpu_rm.h | 22 +- 2 files changed, 255 insertions(+), 320 deletions(-) diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_rm.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_rm.c index 779df26dc81a..594e2d10b000 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_rm.c +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_rm.c @@ -12,8 +12,12 @@ #include "dpu_encoder.h" #include "dpu_trace.h" -#define RESERVED_BY_OTHER(h, r) \ - ((h)->enc_id && (h)->enc_id != r) + +static inline bool reserved_by_other(uint32_t *res_map, int idx, + uint32_t enc_id) +{ + return res_map[idx] && res_map[idx] != enc_id; +} /** * struct dpu_rm_requirements - Reservation requirements parameter bundle @@ -25,126 +29,40 @@ struct dpu_rm_requirements { struct dpu_encoder_hw_resources hw_res; }; - -/** - * struct dpu_rm_hw_blk - hardware block tracking list member - * @list: List head for list of all hardware blocks tracking items - * @id: Hardware ID number, within it's own space, ie. LM_X - * @enc_id: Encoder id to which this blk is binded - * @hw: Pointer to the hardware register access object for this block - */ -struct dpu_rm_hw_blk { - struct list_head list; - uint32_t id; - uint32_t enc_id; - struct dpu_hw_blk *hw; -}; - -/** - * struct dpu_rm_hw_iter - iterator for use with dpu_rm - * @hw: dpu_hw object requested, or NULL on failure - * @blk: dpu_rm internal block representation. Clients ignore. Used as iterator. - * @enc_id: DRM ID of Encoder client wishes to search for, or 0 for Any Encoder - * @type: Hardware Block Type client wishes to search for. - */ -struct dpu_rm_hw_iter { - void *hw; - struct dpu_rm_hw_blk *blk; - uint32_t enc_id; - enum dpu_hw_blk_type type; -}; - -static void dpu_rm_init_hw_iter( - struct dpu_rm_hw_iter *iter, - uint32_t enc_id, - enum dpu_hw_blk_type type) -{ - memset(iter, 0, sizeof(*iter)); - iter->enc_id = enc_id; - iter->type = type; -} - -static bool _dpu_rm_get_hw_locked(struct dpu_rm *rm, struct dpu_rm_hw_iter *i) +int dpu_rm_destroy(struct dpu_rm *rm) { - struct list_head *blk_list; + int i; - if (!rm || !i || i->type >= DPU_HW_BLK_MAX) { - DPU_ERROR("invalid rm\n"); - return false; - } + for (i = 0; i < ARRAY_SIZE(rm->pingpong_blks); i++) { + struct dpu_hw_pingpong *hw; - i->hw = NULL; - blk_list = &rm->hw_blks[i->type]; - - if (i->blk && (&i->blk->list == blk_list)) { - DPU_DEBUG("attempt resume iteration past last\n"); - return false; + if (rm->pingpong_blks[i]) { + hw = to_dpu_hw_pingpong(rm->pingpong_blks[i]); + dpu_hw_pingpong_destroy(hw); + } } + for (i = 0; i < ARRAY_SIZE(rm->mixer_blks); i++) { + struct dpu_hw_mixer *hw; - i->blk = list_prepare_entry(i->blk, blk_list, list); - - list_for_each_entry_continue(i->blk, blk_list, list) { - if (i->enc_id == i->blk->enc_id) { - i->hw = i->blk->hw; - DPU_DEBUG("found type %d id %d for enc %d\n", - i->type, i->blk->id, i->enc_id); - return true; + if (rm->mixer_blks[i]) { + hw = to_dpu_hw_mixer(rm->mixer_blks[i]); + dpu_hw_lm_destroy(hw); } } + for (i = 0; i < ARRAY_SIZE(rm->ctl_blks); i++) { + struct dpu_hw_ctl *hw; - DPU_DEBUG("no match, type %d for enc %d\n", i->type, i->enc_id); - - return false; -} - -static bool dpu_rm_get_hw(struct dpu_rm *rm, struct dpu_rm_hw_iter *i) -{ - bool ret; - - mutex_lock(&rm->rm_lock); - ret = _dpu_rm_get_hw_locked(rm, i); - mutex_unlock(&rm->rm_lock); - - return ret; -} - -static void _dpu_rm_hw_destroy(enum dpu_hw_blk_type type, void *hw) -{ - switch (type) { - case DPU_HW_BLK_LM: - dpu_hw_lm_destroy(hw); - break; - case DPU_HW_BLK_CTL: - dpu_hw_ctl_destroy(hw); - break; - case DPU_HW_BLK_PINGPONG: - dpu_hw_pingpong_destroy(hw); - break; - case DPU_HW_BLK_INTF: - dpu_hw_intf_destroy(hw); - break; - case DPU_HW_BLK_SSPP: - /* SSPPs are not managed by the resource manager */ - case DPU_HW_BLK_TOP: - /* Top is a singleton, not managed in hw_blks list */ - case DPU_HW_BLK_MAX: - default: - DPU_ERROR("unsupported block type %d\n", type); - break; + if (rm->ctl_blks[i]) { + hw = to_dpu_hw_ctl(rm->ctl_blks[i]); + dpu_hw_ctl_destroy(hw); + } } -} + for (i = 0; i < ARRAY_SIZE(rm->intf_blks); i++) { + struct dpu_hw_intf *hw; -int dpu_rm_destroy(struct dpu_rm *rm) -{ - struct dpu_rm_hw_blk *hw_cur, *hw_nxt; - enum dpu_hw_blk_type type; - - for (type = 0; type < DPU_HW_BLK_MAX; type++) { - list_for_each_entry_safe(hw_cur, hw_nxt, &rm->hw_blks[type], - list) { - list_del(&hw_cur->list); - _dpu_rm_hw_destroy(type, hw_cur->hw); - kfree(hw_cur); + if (rm->intf_blks[i]) { + hw = to_dpu_hw_intf(rm->intf_blks[i]); + dpu_hw_intf_destroy(hw); } } @@ -153,65 +71,11 @@ int dpu_rm_destroy(struct dpu_rm *rm) return 0; } -static int _dpu_rm_hw_blk_create( - struct dpu_rm *rm, - const struct dpu_mdss_cfg *cat, - void __iomem *mmio, - enum dpu_hw_blk_type type, - uint32_t id) -{ - struct dpu_rm_hw_blk *blk; - void *hw; - - switch (type) { - case DPU_HW_BLK_LM: - hw = dpu_hw_lm_init(id, mmio, cat); - break; - case DPU_HW_BLK_CTL: - hw = dpu_hw_ctl_init(id, mmio, cat); - break; - case DPU_HW_BLK_PINGPONG: - hw = dpu_hw_pingpong_init(id, mmio, cat); - break; - case DPU_HW_BLK_INTF: - hw = dpu_hw_intf_init(id, mmio, cat); - break; - case DPU_HW_BLK_SSPP: - /* SSPPs are not managed by the resource manager */ - case DPU_HW_BLK_TOP: - /* Top is a singleton, not managed in hw_blks list */ - case DPU_HW_BLK_MAX: - default: - DPU_ERROR("unsupported block type %d\n", type); - return -EINVAL; - } - - if (IS_ERR_OR_NULL(hw)) { - DPU_ERROR("failed hw object creation: type %d, err %ld\n", - type, PTR_ERR(hw)); - return -EFAULT; - } - - blk = kzalloc(sizeof(*blk), GFP_KERNEL); - if (!blk) { - _dpu_rm_hw_destroy(type, hw); - return -ENOMEM; - } - - blk->id = id; - blk->hw = hw; - blk->enc_id = 0; - list_add_tail(&blk->list, &rm->hw_blks[type]); - - return 0; -} - int dpu_rm_init(struct dpu_rm *rm, struct dpu_mdss_cfg *cat, void __iomem *mmio) { int rc, i; - enum dpu_hw_blk_type type; if (!rm || !cat || !mmio) { DPU_ERROR("invalid kms\n"); @@ -223,11 +87,9 @@ int dpu_rm_init(struct dpu_rm *rm, mutex_init(&rm->rm_lock); - for (type = 0; type < DPU_HW_BLK_MAX; type++) - INIT_LIST_HEAD(&rm->hw_blks[type]); - /* Interrogate HW catalog and create tracking items for hw blocks */ for (i = 0; i < cat->mixer_count; i++) { + struct dpu_hw_mixer *hw; const struct dpu_lm_cfg *lm = &cat->mixer[i]; if (lm->pingpong == PINGPONG_MAX) { @@ -235,12 +97,17 @@ int dpu_rm_init(struct dpu_rm *rm, continue; } - rc = _dpu_rm_hw_blk_create(rm, cat, mmio, DPU_HW_BLK_LM, - cat->mixer[i].id); - if (rc) { - DPU_ERROR("failed: lm hw not available\n"); + if (lm->id < LM_0 || lm->id >= LM_MAX) { + DPU_ERROR("skip mixer %d with invalid id\n", lm->id); + continue; + } + hw = dpu_hw_lm_init(lm->id, mmio, cat); + if (IS_ERR_OR_NULL(hw)) { + rc = PTR_ERR(hw); + DPU_ERROR("failed lm object creation: err %d\n", rc); goto fail; } + rm->mixer_blks[lm->id - LM_0] = &hw->base; if (!rm->lm_max_width) { rm->lm_max_width = lm->sblk->maxwidth; @@ -256,35 +123,59 @@ int dpu_rm_init(struct dpu_rm *rm, } for (i = 0; i < cat->pingpong_count; i++) { - rc = _dpu_rm_hw_blk_create(rm, cat, mmio, DPU_HW_BLK_PINGPONG, - cat->pingpong[i].id); - if (rc) { - DPU_ERROR("failed: pp hw not available\n"); + struct dpu_hw_pingpong *hw; + const struct dpu_pingpong_cfg *pp = &cat->pingpong[i]; + + if (pp->id < PINGPONG_0 || pp->id >= PINGPONG_MAX) { + DPU_ERROR("skip pingpong %d with invalid id\n", pp->id); + continue; + } + hw = dpu_hw_pingpong_init(pp->id, mmio, cat); + if (IS_ERR_OR_NULL(hw)) { + rc = PTR_ERR(hw); + DPU_ERROR("failed pingpong object creation: err %d\n", + rc); goto fail; } + rm->pingpong_blks[pp->id - PINGPONG_0] = &hw->base; } for (i = 0; i < cat->intf_count; i++) { - if (cat->intf[i].type == INTF_NONE) { + struct dpu_hw_intf *hw; + const struct dpu_intf_cfg *intf = &cat->intf[i]; + + if (intf->type == INTF_NONE) { DPU_DEBUG("skip intf %d with type none\n", i); continue; } - - rc = _dpu_rm_hw_blk_create(rm, cat, mmio, DPU_HW_BLK_INTF, - cat->intf[i].id); - if (rc) { - DPU_ERROR("failed: intf hw not available\n"); + if (intf->id < INTF_0 || intf->id >= INTF_MAX) { + DPU_ERROR("skip intf %d with invalid id\n", intf->id); + continue; + } + hw = dpu_hw_intf_init(intf->id, mmio, cat); + if (IS_ERR_OR_NULL(hw)) { + rc = PTR_ERR(hw); + DPU_ERROR("failed intf object creation: err %d\n", rc); goto fail; } + rm->intf_blks[intf->id - INTF_0] = &hw->base; } for (i = 0; i < cat->ctl_count; i++) { - rc = _dpu_rm_hw_blk_create(rm, cat, mmio, DPU_HW_BLK_CTL, - cat->ctl[i].id); - if (rc) { - DPU_ERROR("failed: ctl hw not available\n"); + struct dpu_hw_ctl *hw; + const struct dpu_ctl_cfg *ctl = &cat->ctl[i]; + + if (ctl->id < CTL_0 || ctl->id >= CTL_MAX) { + DPU_ERROR("skip ctl %d with invalid id\n", ctl->id); + continue; + } + hw = dpu_hw_ctl_init(ctl->id, mmio, cat); + if (IS_ERR_OR_NULL(hw)) { + rc = PTR_ERR(hw); + DPU_ERROR("failed ctl object creation: err %d\n", rc); goto fail; } + rm->ctl_blks[ctl->id - CTL_0] = &hw->base; } return 0; @@ -292,7 +183,7 @@ int dpu_rm_init(struct dpu_rm *rm, fail: dpu_rm_destroy(rm); - return rc; + return rc ? rc : -EFAULT; } static bool _dpu_rm_needs_split_display(const struct msm_display_topology *top) @@ -300,72 +191,69 @@ static bool _dpu_rm_needs_split_display(const struct msm_display_topology *top) return top->num_intf > 1; } +/** + * _dpu_rm_check_lm_peer - check if a mixer is a peer of the primary + * @rm: dpu resource manager handle + * @primary_idx: index of primary mixer in rm->mixer_blks[] + * @peer_idx: index of other mixer in rm->mixer_blks[] + * @Return: true if rm->mixer_blks[peer_idx] is a peer of + * rm->mixer_blks[primary_idx] + */ +static bool _dpu_rm_check_lm_peer(struct dpu_rm *rm, int primary_idx, + int peer_idx) +{ + const struct dpu_lm_cfg *prim_lm_cfg; + const struct dpu_lm_cfg *peer_cfg; + + prim_lm_cfg = to_dpu_hw_mixer(rm->mixer_blks[primary_idx])->cap; + peer_cfg = to_dpu_hw_mixer(rm->mixer_blks[peer_idx])->cap; + + if (!test_bit(peer_cfg->id, &prim_lm_cfg->lm_pair_mask)) { + DPU_DEBUG("lm %d not peer of lm %d\n", peer_cfg->id, + peer_cfg->id); + return false; + } + return true; +} + /** * _dpu_rm_check_lm_and_get_connected_blks - check if proposed layer mixer meets * proposed use case requirements, incl. hardwired dependent blocks like * pingpong * @rm: dpu resource manager handle * @enc_id: encoder id requesting for allocation - * @lm: proposed layer mixer, function checks if lm, and all other hardwired - * blocks connected to the lm (pp) is available and appropriate - * @pp: output parameter, pingpong block attached to the layer mixer. - * NULL if pp was not available, or not matching requirements. - * @primary_lm: if non-null, this function check if lm is compatible primary_lm - * as well as satisfying all other requirements + * @lm_idx: index of proposed layer mixer in rm->mixer_blks[], function checks + * if lm, and all other hardwired blocks connected to the lm (pp) is + * available and appropriate + * @pp_idx: output parameter, index of pingpong block attached to the layer + * mixer in rm->pongpong_blks[]. * @Return: true if lm matches all requirements, false otherwise */ -static bool _dpu_rm_check_lm_and_get_connected_blks( - struct dpu_rm *rm, - uint32_t enc_id, - struct dpu_rm_hw_blk *lm, - struct dpu_rm_hw_blk **pp, - struct dpu_rm_hw_blk *primary_lm) +static bool _dpu_rm_check_lm_and_get_connected_blks(struct dpu_rm *rm, + uint32_t enc_id, int lm_idx, int *pp_idx) { - const struct dpu_lm_cfg *lm_cfg = to_dpu_hw_mixer(lm->hw)->cap; - struct dpu_rm_hw_iter iter; - - *pp = NULL; - - DPU_DEBUG("check lm %d pp %d\n", - lm_cfg->id, lm_cfg->pingpong); - - /* Check if this layer mixer is a peer of the proposed primary LM */ - if (primary_lm) { - const struct dpu_lm_cfg *prim_lm_cfg = - to_dpu_hw_mixer(primary_lm->hw)->cap; - - if (!test_bit(lm_cfg->id, &prim_lm_cfg->lm_pair_mask)) { - DPU_DEBUG("lm %d not peer of lm %d\n", lm_cfg->id, - prim_lm_cfg->id); - return false; - } - } + const struct dpu_lm_cfg *lm_cfg; + int idx; /* Already reserved? */ - if (RESERVED_BY_OTHER(lm, enc_id)) { - DPU_DEBUG("lm %d already reserved\n", lm_cfg->id); + if (reserved_by_other(rm->mixer_to_enc_id, lm_idx, enc_id)) { + DPU_DEBUG("lm %d already reserved\n", lm_idx + LM_0); return false; } - dpu_rm_init_hw_iter(&iter, 0, DPU_HW_BLK_PINGPONG); - while (_dpu_rm_get_hw_locked(rm, &iter)) { - if (iter.blk->id == lm_cfg->pingpong) { - *pp = iter.blk; - break; - } - } - - if (!*pp) { + lm_cfg = to_dpu_hw_mixer(rm->mixer_blks[lm_idx])->cap; + idx = lm_cfg->pingpong - PINGPONG_0; + if (idx < 0 || idx >= ARRAY_SIZE(rm->pingpong_blks)) { DPU_ERROR("failed to get pp on lm %d\n", lm_cfg->pingpong); return false; } - if (RESERVED_BY_OTHER(*pp, enc_id)) { - DPU_DEBUG("lm %d pp %d already reserved\n", lm->id, - (*pp)->id); + if (reserved_by_other(rm->pingpong_to_enc_id, idx, enc_id)) { + DPU_DEBUG("lm %d pp %d already reserved\n", lm_cfg->id, + lm_cfg->pingpong); return false; } - + *pp_idx = idx; return true; } @@ -373,11 +261,9 @@ static int _dpu_rm_reserve_lms(struct dpu_rm *rm, uint32_t enc_id, struct dpu_rm_requirements *reqs) { - struct dpu_rm_hw_blk *lm[MAX_BLOCKS]; - struct dpu_rm_hw_blk *pp[MAX_BLOCKS]; - struct dpu_rm_hw_iter iter_i, iter_j; - int lm_count = 0; - int i, rc = 0; + int lm_idx[MAX_BLOCKS]; + int pp_idx[MAX_BLOCKS]; + int i, j, lm_count = 0; if (!reqs->topology.num_lm) { DPU_ERROR("invalid number of lm: %d\n", reqs->topology.num_lm); @@ -385,36 +271,39 @@ static int _dpu_rm_reserve_lms(struct dpu_rm *rm, uint32_t enc_id, } /* Find a primary mixer */ - dpu_rm_init_hw_iter(&iter_i, 0, DPU_HW_BLK_LM); - while (lm_count != reqs->topology.num_lm && - _dpu_rm_get_hw_locked(rm, &iter_i)) { - memset(&lm, 0, sizeof(lm)); - memset(&pp, 0, sizeof(pp)); + for (i = 0; i < ARRAY_SIZE(rm->mixer_blks) && + lm_count < reqs->topology.num_lm; i++) { + if (!rm->mixer_blks[i]) + continue; lm_count = 0; - lm[lm_count] = iter_i.blk; + lm_idx[lm_count] = i; if (!_dpu_rm_check_lm_and_get_connected_blks( - rm, enc_id, lm[lm_count], - &pp[lm_count], NULL)) + rm, enc_id, i, &pp_idx[lm_count])) { continue; + } ++lm_count; /* Valid primary mixer found, find matching peers */ - dpu_rm_init_hw_iter(&iter_j, 0, DPU_HW_BLK_LM); + for (j = i + 1; j < ARRAY_SIZE(rm->mixer_blks) && + lm_count < reqs->topology.num_lm; j++) { + if (!rm->mixer_blks[j]) + continue; - while (lm_count != reqs->topology.num_lm && - _dpu_rm_get_hw_locked(rm, &iter_j)) { - if (iter_i.blk == iter_j.blk) + if (!_dpu_rm_check_lm_peer(rm, i, j)) { + DPU_DEBUG("lm %d not peer of lm %d\n", LM_0 + j, + LM_0 + i); continue; + } if (!_dpu_rm_check_lm_and_get_connected_blks( - rm, enc_id, iter_j.blk, - &pp[lm_count], iter_i.blk)) + rm, enc_id, j, &pp_idx[lm_count])) { continue; + } - lm[lm_count] = iter_j.blk; + lm_idx[lm_count] = j; ++lm_count; } } @@ -424,17 +313,15 @@ static int _dpu_rm_reserve_lms(struct dpu_rm *rm, uint32_t enc_id, return -ENAVAIL; } - for (i = 0; i < ARRAY_SIZE(lm); i++) { - if (!lm[i]) - break; - - lm[i]->enc_id = enc_id; - pp[i]->enc_id = enc_id; + for (i = 0; i < lm_count; i++) { + rm->mixer_to_enc_id[lm_idx[i]] = enc_id; + rm->pingpong_to_enc_id[pp_idx[i]] = enc_id; - trace_dpu_rm_reserve_lms(lm[i]->id, enc_id, pp[i]->id); + trace_dpu_rm_reserve_lms(lm_idx[i] + LM_0, enc_id, + pp_idx[i] + PINGPONG_0); } - return rc; + return 0; } static int _dpu_rm_reserve_ctls( @@ -442,47 +329,48 @@ static int _dpu_rm_reserve_ctls( uint32_t enc_id, const struct msm_display_topology *top) { - struct dpu_rm_hw_blk *ctls[MAX_BLOCKS]; - struct dpu_rm_hw_iter iter; - int i = 0, num_ctls = 0; - bool needs_split_display = false; - - memset(&ctls, 0, sizeof(ctls)); + int ctl_idx[MAX_BLOCKS]; + int i = 0, j, num_ctls; + bool needs_split_display; /* each hw_intf needs its own hw_ctrl to program its control path */ num_ctls = top->num_intf; needs_split_display = _dpu_rm_needs_split_display(top); - dpu_rm_init_hw_iter(&iter, 0, DPU_HW_BLK_CTL); - while (_dpu_rm_get_hw_locked(rm, &iter)) { - const struct dpu_hw_ctl *ctl = to_dpu_hw_ctl(iter.blk->hw); - unsigned long features = ctl->caps->features; + for (j = 0; j < ARRAY_SIZE(rm->ctl_blks); j++) { + const struct dpu_hw_ctl *ctl; + unsigned long features; bool has_split_display; - if (RESERVED_BY_OTHER(iter.blk, enc_id)) + if (!rm->ctl_blks[j]) + continue; + if (reserved_by_other(rm->ctl_to_enc_id, j, enc_id)) continue; + ctl = to_dpu_hw_ctl(rm->ctl_blks[j]); + features = ctl->caps->features; has_split_display = BIT(DPU_CTL_SPLIT_DISPLAY) & features; - DPU_DEBUG("ctl %d caps 0x%lX\n", iter.blk->id, features); + DPU_DEBUG("ctl %d caps 0x%lX\n", rm->ctl_blks[j]->id, features); if (needs_split_display != has_split_display) continue; - ctls[i] = iter.blk; - DPU_DEBUG("ctl %d match\n", iter.blk->id); + ctl_idx[i] = j; + DPU_DEBUG("ctl %d match\n", j + CTL_0); if (++i == num_ctls) break; + } if (i != num_ctls) return -ENAVAIL; - for (i = 0; i < ARRAY_SIZE(ctls) && i < num_ctls; i++) { - ctls[i]->enc_id = enc_id; - trace_dpu_rm_reserve_ctls(ctls[i]->id, enc_id); + for (i = 0; i < ARRAY_SIZE(ctl_idx) && i < num_ctls; i++) { + rm->ctl_to_enc_id[ctl_idx[i]] = enc_id; + trace_dpu_rm_reserve_ctls(i + CTL_0, enc_id); } return 0; @@ -493,32 +381,20 @@ static int _dpu_rm_reserve_intf( uint32_t enc_id, uint32_t id) { - struct dpu_rm_hw_iter iter; - int ret = 0; - - /* Find the block entry in the rm, and note the reservation */ - dpu_rm_init_hw_iter(&iter, 0, DPU_HW_BLK_INTF); - while (_dpu_rm_get_hw_locked(rm, &iter)) { - if (iter.blk->id != id) - continue; - - if (RESERVED_BY_OTHER(iter.blk, enc_id)) { - DPU_ERROR("intf id %d already reserved\n", id); - return -ENAVAIL; - } + int idx = id - INTF_0; - iter.blk->enc_id = enc_id; - trace_dpu_rm_reserve_intf(iter.blk->id, enc_id); - break; - } - - /* Shouldn't happen since intfs are fixed at probe */ - if (!iter.hw) { + if (!rm->intf_blks[idx]) { DPU_ERROR("couldn't find intf id %d\n", id); return -EINVAL; } - return ret; + if (reserved_by_other(rm->intf_to_enc_id, idx, enc_id)) { + DPU_ERROR("intf id %d already reserved\n", id); + return -ENAVAIL; + } + + rm->intf_to_enc_id[idx] = enc_id; + return 0; } static int _dpu_rm_reserve_intf_related_hw( @@ -583,22 +459,29 @@ static int _dpu_rm_populate_requirements( return 0; } -static void _dpu_rm_release_reservation(struct dpu_rm *rm, uint32_t enc_id) +static void _dpu_rm_clear_mapping(uint32_t *res_mapping, int cnt, + uint32_t enc_id) { - struct dpu_rm_hw_blk *blk; - enum dpu_hw_blk_type type; - - for (type = 0; type < DPU_HW_BLK_MAX; type++) { - list_for_each_entry(blk, &rm->hw_blks[type], list) { - if (blk->enc_id == enc_id) { - blk->enc_id = 0; - DPU_DEBUG("rel enc %d %d %d\n", enc_id, - type, blk->id); - } - } + int i; + + for (i = 0; i < cnt; i++) { + if (res_mapping[i] == enc_id) + res_mapping[i] = 0; } } +static void _dpu_rm_release_reservation(struct dpu_rm *rm, uint32_t enc_id) +{ + _dpu_rm_clear_mapping(rm->pingpong_to_enc_id, + ARRAY_SIZE(rm->pingpong_to_enc_id), enc_id); + _dpu_rm_clear_mapping(rm->mixer_to_enc_id, + ARRAY_SIZE(rm->mixer_to_enc_id), enc_id); + _dpu_rm_clear_mapping(rm->ctl_to_enc_id, + ARRAY_SIZE(rm->ctl_to_enc_id), enc_id); + _dpu_rm_clear_mapping(rm->intf_to_enc_id, + ARRAY_SIZE(rm->intf_to_enc_id), enc_id); +} + void dpu_rm_release(struct dpu_rm *rm, struct drm_encoder *enc) { mutex_lock(&rm->rm_lock); @@ -653,12 +536,48 @@ end: int dpu_rm_get_assigned_resources(struct dpu_rm *rm, uint32_t enc_id, enum dpu_hw_blk_type type, struct dpu_hw_blk **blks, int blks_size) { - struct dpu_rm_hw_iter hw_iter; - int num_blks = 0; + struct dpu_hw_blk **hw_blks; + uint32_t *hw_to_enc_id; + int i, num_blks, max_blks; + + switch (type) { + case DPU_HW_BLK_PINGPONG: + hw_blks = rm->pingpong_blks; + hw_to_enc_id = rm->pingpong_to_enc_id; + max_blks = ARRAY_SIZE(rm->pingpong_blks); + break; + case DPU_HW_BLK_LM: + hw_blks = rm->mixer_blks; + hw_to_enc_id = rm->mixer_to_enc_id; + max_blks = ARRAY_SIZE(rm->mixer_blks); + break; + case DPU_HW_BLK_CTL: + hw_blks = rm->ctl_blks; + hw_to_enc_id = rm->ctl_to_enc_id; + max_blks = ARRAY_SIZE(rm->ctl_blks); + break; + case DPU_HW_BLK_INTF: + hw_blks = rm->intf_blks; + hw_to_enc_id = rm->intf_to_enc_id; + max_blks = ARRAY_SIZE(rm->intf_blks); + break; + default: + DPU_ERROR("blk type %d not managed by rm\n", type); + return 0; + } - dpu_rm_init_hw_iter(&hw_iter, enc_id, type); - while (num_blks < blks_size && dpu_rm_get_hw(rm, &hw_iter)) - blks[num_blks++] = hw_iter.blk->hw; + num_blks = 0; + for (i = 0; i < max_blks; i++) { + if (hw_to_enc_id[i] != enc_id) + continue; + + if (num_blks == blks_size) { + DPU_ERROR("More than %d resources assigned to enc %d\n", + blks_size, enc_id); + break; + } + blks[num_blks++] = hw_blks[i]; + } return num_blks; } diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_rm.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_rm.h index 982b91e27227..9c8a436ba6cc 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_rm.h +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_rm.h @@ -11,15 +11,31 @@ #include "msm_kms.h" #include "dpu_hw_top.h" + /** * struct dpu_rm - DPU dynamic hardware resource manager - * @hw_blks: array of lists of hardware resources present in the system, one - * list per type of hardware block + * @pingpong_blks: array of pingpong hardware resources + * @mixer_blks: array of layer mixer hardware resources + * @ctl_blks: array of ctl hardware resources + * @intf_blks: array of intf hardware resources + * @pingpong_to_enc_id: mapping of pingpong hardware resources to an encoder ID + * @mixer_to_enc_id: mapping of mixer hardware resources to an encoder ID + * @ctl_to_enc_id: mapping of ctl hardware resources to an encoder ID + * @intf_to_enc_id: mapping of intf hardware resources to an encoder ID * @lm_max_width: cached layer mixer maximum width * @rm_lock: resource manager mutex */ struct dpu_rm { - struct list_head hw_blks[DPU_HW_BLK_MAX]; + struct dpu_hw_blk *pingpong_blks[PINGPONG_MAX - PINGPONG_0]; + struct dpu_hw_blk *mixer_blks[LM_MAX - LM_0]; + struct dpu_hw_blk *ctl_blks[CTL_MAX - CTL_0]; + struct dpu_hw_blk *intf_blks[INTF_MAX - INTF_0]; + + uint32_t pingpong_to_enc_id[PINGPONG_MAX - PINGPONG_0]; + uint32_t mixer_to_enc_id[LM_MAX - LM_0]; + uint32_t ctl_to_enc_id[CTL_MAX - CTL_0]; + uint32_t intf_to_enc_id[INTF_MAX - INTF_0]; + uint32_t lm_max_width; struct mutex rm_lock; }; -- cgit v1.2.3 From de3916c70a24e3e1bdbf6b0a77d75b069d8953d9 Mon Sep 17 00:00:00 2001 From: Drew Davenport Date: Wed, 19 Feb 2020 10:42:27 -0700 Subject: drm/msm/dpu: Track resources in global state Move mapping of resources to encoder ids from the resource manager to a new dpu_global_state struct. Store this struct in global atomic state. Before this patch, atomic test would be performed by modifying global state (resource manager), and backing out any changes if the test fails. By using drm atomic global state, this is not necessary as any changes to the global state will be discarded if the test fails. Signed-off-by: Drew Davenport Signed-off-by: Rob Clark --- drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c | 65 +++++++-------- drivers/gpu/drm/msm/disp/dpu1/dpu_kms.c | 84 +++++++++++++++++++ drivers/gpu/drm/msm/disp/dpu1/dpu_kms.h | 26 ++++++ drivers/gpu/drm/msm/disp/dpu1/dpu_rm.c | 121 ++++++++++++++-------------- drivers/gpu/drm/msm/disp/dpu1/dpu_rm.h | 22 ++--- 5 files changed, 207 insertions(+), 111 deletions(-) diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c index 6cadeff456f0..5afde2e7fef0 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c @@ -164,7 +164,6 @@ enum dpu_enc_rc_states { * clks and resources after IDLE_TIMEOUT time. * @vsync_event_work: worker to handle vsync event for autorefresh * @topology: topology of the display - * @mode_set_complete: flag to indicate modeset completion * @idle_timeout: idle timeout duration in milliseconds */ struct dpu_encoder_virt { @@ -202,7 +201,6 @@ struct dpu_encoder_virt { struct delayed_work delayed_off_work; struct kthread_work vsync_event_work; struct msm_display_topology topology; - bool mode_set_complete; u32 idle_timeout; }; @@ -563,6 +561,7 @@ static int dpu_encoder_virt_atomic_check( const struct drm_display_mode *mode; struct drm_display_mode *adj_mode; struct msm_display_topology topology; + struct dpu_global_state *global_state; int i = 0; int ret = 0; @@ -579,6 +578,7 @@ static int dpu_encoder_virt_atomic_check( dpu_kms = to_dpu_kms(priv->kms); mode = &crtc_state->mode; adj_mode = &crtc_state->adjusted_mode; + global_state = dpu_kms_get_existing_global_state(dpu_kms); trace_dpu_enc_atomic_check(DRMID(drm_enc)); /* @@ -610,17 +610,15 @@ static int dpu_encoder_virt_atomic_check( topology = dpu_encoder_get_topology(dpu_enc, dpu_kms, adj_mode); - /* Reserve dynamic resources now. Indicating AtomicTest phase */ + /* Reserve dynamic resources now. */ if (!ret) { /* * Avoid reserving resources when mode set is pending. Topology * info may not be available to complete reservation. */ - if (drm_atomic_crtc_needs_modeset(crtc_state) - && dpu_enc->mode_set_complete) { - ret = dpu_rm_reserve(&dpu_kms->rm, drm_enc, crtc_state, - topology, true); - dpu_enc->mode_set_complete = false; + if (drm_atomic_crtc_needs_modeset(crtc_state)) { + ret = dpu_rm_reserve(&dpu_kms->rm, global_state, + drm_enc, crtc_state, topology); } } @@ -957,12 +955,13 @@ static void dpu_encoder_virt_mode_set(struct drm_encoder *drm_enc, struct drm_connector *conn = NULL, *conn_iter; struct drm_crtc *drm_crtc; struct dpu_crtc_state *cstate; + struct dpu_global_state *global_state; struct msm_display_topology topology; struct dpu_hw_blk *hw_pp[MAX_CHANNELS_PER_ENC]; struct dpu_hw_blk *hw_ctl[MAX_CHANNELS_PER_ENC]; struct dpu_hw_blk *hw_lm[MAX_CHANNELS_PER_ENC]; int num_lm, num_ctl, num_pp; - int i, j, ret; + int i, j; if (!drm_enc) { DPU_ERROR("invalid encoder\n"); @@ -976,6 +975,12 @@ static void dpu_encoder_virt_mode_set(struct drm_encoder *drm_enc, dpu_kms = to_dpu_kms(priv->kms); connector_list = &dpu_kms->dev->mode_config.connector_list; + global_state = dpu_kms_get_existing_global_state(dpu_kms); + if (IS_ERR_OR_NULL(global_state)) { + DPU_ERROR("Failed to get global state"); + return; + } + trace_dpu_enc_mode_set(DRMID(drm_enc)); list_for_each_entry(conn_iter, connector_list, head) @@ -996,21 +1001,14 @@ static void dpu_encoder_virt_mode_set(struct drm_encoder *drm_enc, topology = dpu_encoder_get_topology(dpu_enc, dpu_kms, adj_mode); - /* Reserve dynamic resources now. Indicating non-AtomicTest phase */ - ret = dpu_rm_reserve(&dpu_kms->rm, drm_enc, drm_crtc->state, - topology, false); - if (ret) { - DPU_ERROR_ENC(dpu_enc, - "failed to reserve hw resources, %d\n", ret); - return; - } - - num_pp = dpu_rm_get_assigned_resources(&dpu_kms->rm, drm_enc->base.id, - DPU_HW_BLK_PINGPONG, hw_pp, ARRAY_SIZE(hw_pp)); - num_ctl = dpu_rm_get_assigned_resources(&dpu_kms->rm, drm_enc->base.id, - DPU_HW_BLK_CTL, hw_ctl, ARRAY_SIZE(hw_ctl)); - num_lm = dpu_rm_get_assigned_resources(&dpu_kms->rm, drm_enc->base.id, - DPU_HW_BLK_LM, hw_lm, ARRAY_SIZE(hw_lm)); + /* Query resource that have been reserved in atomic check step. */ + num_pp = dpu_rm_get_assigned_resources(&dpu_kms->rm, global_state, + drm_enc->base.id, DPU_HW_BLK_PINGPONG, hw_pp, + ARRAY_SIZE(hw_pp)); + num_ctl = dpu_rm_get_assigned_resources(&dpu_kms->rm, global_state, + drm_enc->base.id, DPU_HW_BLK_CTL, hw_ctl, ARRAY_SIZE(hw_ctl)); + num_lm = dpu_rm_get_assigned_resources(&dpu_kms->rm, global_state, + drm_enc->base.id, DPU_HW_BLK_LM, hw_lm, ARRAY_SIZE(hw_lm)); for (i = 0; i < MAX_CHANNELS_PER_ENC; i++) dpu_enc->hw_pp[i] = i < num_pp ? to_dpu_hw_pingpong(hw_pp[i]) @@ -1035,21 +1033,21 @@ static void dpu_encoder_virt_mode_set(struct drm_encoder *drm_enc, if (!dpu_enc->hw_pp[i]) { DPU_ERROR_ENC(dpu_enc, "no pp block assigned at idx: %d\n", i); - goto error; + return; } if (!hw_ctl[i]) { DPU_ERROR_ENC(dpu_enc, "no ctl block assigned at idx: %d\n", i); - goto error; + return; } phys->hw_pp = dpu_enc->hw_pp[i]; phys->hw_ctl = to_dpu_hw_ctl(hw_ctl[i]); num_blk = dpu_rm_get_assigned_resources(&dpu_kms->rm, - drm_enc->base.id, DPU_HW_BLK_INTF, hw_blk, - ARRAY_SIZE(hw_blk)); + global_state, drm_enc->base.id, DPU_HW_BLK_INTF, + hw_blk, ARRAY_SIZE(hw_blk)); for (j = 0; j < num_blk; j++) { struct dpu_hw_intf *hw_intf; @@ -1061,18 +1059,13 @@ static void dpu_encoder_virt_mode_set(struct drm_encoder *drm_enc, if (!phys->hw_intf) { DPU_ERROR_ENC(dpu_enc, "no intf block assigned at idx: %d\n", i); - goto error; + return; } phys->connector = conn->state->connector; if (phys->ops.mode_set) phys->ops.mode_set(phys, mode, adj_mode); } - - dpu_enc->mode_set_complete = true; - -error: - dpu_rm_release(&dpu_kms->rm, drm_enc); } static void _dpu_encoder_virt_enable_helper(struct drm_encoder *drm_enc) @@ -1169,6 +1162,7 @@ static void dpu_encoder_virt_disable(struct drm_encoder *drm_enc) struct dpu_encoder_virt *dpu_enc = NULL; struct msm_drm_private *priv; struct dpu_kms *dpu_kms; + struct dpu_global_state *global_state; int i = 0; if (!drm_enc) { @@ -1187,6 +1181,7 @@ static void dpu_encoder_virt_disable(struct drm_encoder *drm_enc) priv = drm_enc->dev->dev_private; dpu_kms = to_dpu_kms(priv->kms); + global_state = dpu_kms_get_existing_global_state(dpu_kms); trace_dpu_enc_disable(DRMID(drm_enc)); @@ -1216,7 +1211,7 @@ static void dpu_encoder_virt_disable(struct drm_encoder *drm_enc) DPU_DEBUG_ENC(dpu_enc, "encoder disabled\n"); - dpu_rm_release(&dpu_kms->rm, drm_enc); + dpu_rm_release(global_state, drm_enc); mutex_unlock(&dpu_enc->enc_lock); } diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.c index cb08fafb1dc1..59d42ff9da64 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.c +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.c @@ -228,6 +228,85 @@ static int dpu_kms_debugfs_init(struct msm_kms *kms, struct drm_minor *minor) } #endif +/* Global/shared object state funcs */ + +/* + * This is a helper that returns the private state currently in operation. + * Note that this would return the "old_state" if called in the atomic check + * path, and the "new_state" after the atomic swap has been done. + */ +struct dpu_global_state * +dpu_kms_get_existing_global_state(struct dpu_kms *dpu_kms) +{ + return to_dpu_global_state(dpu_kms->global_state.state); +} + +/* + * This acquires the modeset lock set aside for global state, creates + * a new duplicated private object state. + */ +struct dpu_global_state *dpu_kms_get_global_state(struct drm_atomic_state *s) +{ + struct msm_drm_private *priv = s->dev->dev_private; + struct dpu_kms *dpu_kms = to_dpu_kms(priv->kms); + struct drm_private_state *priv_state; + int ret; + + ret = drm_modeset_lock(&dpu_kms->global_state_lock, s->acquire_ctx); + if (ret) + return ERR_PTR(ret); + + priv_state = drm_atomic_get_private_obj_state(s, + &dpu_kms->global_state); + if (IS_ERR(priv_state)) + return ERR_CAST(priv_state); + + return to_dpu_global_state(priv_state); +} + +static struct drm_private_state * +dpu_kms_global_duplicate_state(struct drm_private_obj *obj) +{ + struct dpu_global_state *state; + + state = kmemdup(obj->state, sizeof(*state), GFP_KERNEL); + if (!state) + return NULL; + + __drm_atomic_helper_private_obj_duplicate_state(obj, &state->base); + + return &state->base; +} + +static void dpu_kms_global_destroy_state(struct drm_private_obj *obj, + struct drm_private_state *state) +{ + struct dpu_global_state *dpu_state = to_dpu_global_state(state); + + kfree(dpu_state); +} + +static const struct drm_private_state_funcs dpu_kms_global_state_funcs = { + .atomic_duplicate_state = dpu_kms_global_duplicate_state, + .atomic_destroy_state = dpu_kms_global_destroy_state, +}; + +static int dpu_kms_global_obj_init(struct dpu_kms *dpu_kms) +{ + struct dpu_global_state *state; + + drm_modeset_lock_init(&dpu_kms->global_state_lock); + + state = kzalloc(sizeof(*state), GFP_KERNEL); + if (!state) + return -ENOMEM; + + drm_atomic_private_obj_init(dpu_kms->dev, &dpu_kms->global_state, + &state->base, + &dpu_kms_global_state_funcs); + return 0; +} + static int dpu_kms_enable_vblank(struct msm_kms *kms, struct drm_crtc *crtc) { return dpu_crtc_vblank(crtc, true); @@ -770,6 +849,11 @@ static int dpu_kms_hw_init(struct msm_kms *kms) dpu_kms = to_dpu_kms(kms); dev = dpu_kms->dev; + + rc = dpu_kms_global_obj_init(dpu_kms); + if (rc) + return rc; + priv = dev->dev_private; atomic_set(&dpu_kms->bandwidth_ref, 0); diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.h index c6169e7df19d..211f5de99a44 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.h +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.h @@ -111,6 +111,13 @@ struct dpu_kms { struct dpu_core_perf perf; + /* + * Global private object state, Do not access directly, use + * dpu_kms_global_get_state() + */ + struct drm_modeset_lock global_state_lock; + struct drm_private_obj global_state; + struct dpu_rm rm; bool rm_init; @@ -139,6 +146,25 @@ struct vsync_info { #define to_dpu_kms(x) container_of(x, struct dpu_kms, base) +#define to_dpu_global_state(x) container_of(x, struct dpu_global_state, base) + +/* Global private object state for tracking resources that are shared across + * multiple kms objects (planes/crtcs/etc). + */ +struct dpu_global_state { + struct drm_private_state base; + + uint32_t pingpong_to_enc_id[PINGPONG_MAX - PINGPONG_0]; + uint32_t mixer_to_enc_id[LM_MAX - LM_0]; + uint32_t ctl_to_enc_id[CTL_MAX - CTL_0]; + uint32_t intf_to_enc_id[INTF_MAX - INTF_0]; +}; + +struct dpu_global_state + *dpu_kms_get_existing_global_state(struct dpu_kms *dpu_kms); +struct dpu_global_state + *__must_check dpu_kms_get_global_state(struct drm_atomic_state *s); + /** * Debugfs functions - extra helper functions for debugfs support * diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_rm.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_rm.c index 594e2d10b000..9b62451b01ee 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_rm.c +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_rm.c @@ -66,8 +66,6 @@ int dpu_rm_destroy(struct dpu_rm *rm) } } - mutex_destroy(&rm->rm_lock); - return 0; } @@ -85,8 +83,6 @@ int dpu_rm_init(struct dpu_rm *rm, /* Clear, setup lists */ memset(rm, 0, sizeof(*rm)); - mutex_init(&rm->rm_lock); - /* Interrogate HW catalog and create tracking items for hw blocks */ for (i = 0; i < cat->mixer_count; i++) { struct dpu_hw_mixer *hw; @@ -230,13 +226,14 @@ static bool _dpu_rm_check_lm_peer(struct dpu_rm *rm, int primary_idx, * @Return: true if lm matches all requirements, false otherwise */ static bool _dpu_rm_check_lm_and_get_connected_blks(struct dpu_rm *rm, + struct dpu_global_state *global_state, uint32_t enc_id, int lm_idx, int *pp_idx) { const struct dpu_lm_cfg *lm_cfg; int idx; /* Already reserved? */ - if (reserved_by_other(rm->mixer_to_enc_id, lm_idx, enc_id)) { + if (reserved_by_other(global_state->mixer_to_enc_id, lm_idx, enc_id)) { DPU_DEBUG("lm %d already reserved\n", lm_idx + LM_0); return false; } @@ -248,7 +245,7 @@ static bool _dpu_rm_check_lm_and_get_connected_blks(struct dpu_rm *rm, return false; } - if (reserved_by_other(rm->pingpong_to_enc_id, idx, enc_id)) { + if (reserved_by_other(global_state->pingpong_to_enc_id, idx, enc_id)) { DPU_DEBUG("lm %d pp %d already reserved\n", lm_cfg->id, lm_cfg->pingpong); return false; @@ -257,7 +254,9 @@ static bool _dpu_rm_check_lm_and_get_connected_blks(struct dpu_rm *rm, return true; } -static int _dpu_rm_reserve_lms(struct dpu_rm *rm, uint32_t enc_id, +static int _dpu_rm_reserve_lms(struct dpu_rm *rm, + struct dpu_global_state *global_state, + uint32_t enc_id, struct dpu_rm_requirements *reqs) { @@ -279,8 +278,8 @@ static int _dpu_rm_reserve_lms(struct dpu_rm *rm, uint32_t enc_id, lm_count = 0; lm_idx[lm_count] = i; - if (!_dpu_rm_check_lm_and_get_connected_blks( - rm, enc_id, i, &pp_idx[lm_count])) { + if (!_dpu_rm_check_lm_and_get_connected_blks(rm, global_state, + enc_id, i, &pp_idx[lm_count])) { continue; } @@ -298,8 +297,9 @@ static int _dpu_rm_reserve_lms(struct dpu_rm *rm, uint32_t enc_id, continue; } - if (!_dpu_rm_check_lm_and_get_connected_blks( - rm, enc_id, j, &pp_idx[lm_count])) { + if (!_dpu_rm_check_lm_and_get_connected_blks(rm, + global_state, enc_id, j, + &pp_idx[lm_count])) { continue; } @@ -314,8 +314,8 @@ static int _dpu_rm_reserve_lms(struct dpu_rm *rm, uint32_t enc_id, } for (i = 0; i < lm_count; i++) { - rm->mixer_to_enc_id[lm_idx[i]] = enc_id; - rm->pingpong_to_enc_id[pp_idx[i]] = enc_id; + global_state->mixer_to_enc_id[lm_idx[i]] = enc_id; + global_state->pingpong_to_enc_id[pp_idx[i]] = enc_id; trace_dpu_rm_reserve_lms(lm_idx[i] + LM_0, enc_id, pp_idx[i] + PINGPONG_0); @@ -326,6 +326,7 @@ static int _dpu_rm_reserve_lms(struct dpu_rm *rm, uint32_t enc_id, static int _dpu_rm_reserve_ctls( struct dpu_rm *rm, + struct dpu_global_state *global_state, uint32_t enc_id, const struct msm_display_topology *top) { @@ -345,7 +346,7 @@ static int _dpu_rm_reserve_ctls( if (!rm->ctl_blks[j]) continue; - if (reserved_by_other(rm->ctl_to_enc_id, j, enc_id)) + if (reserved_by_other(global_state->ctl_to_enc_id, j, enc_id)) continue; ctl = to_dpu_hw_ctl(rm->ctl_blks[j]); @@ -369,7 +370,7 @@ static int _dpu_rm_reserve_ctls( return -ENAVAIL; for (i = 0; i < ARRAY_SIZE(ctl_idx) && i < num_ctls; i++) { - rm->ctl_to_enc_id[ctl_idx[i]] = enc_id; + global_state->ctl_to_enc_id[ctl_idx[i]] = enc_id; trace_dpu_rm_reserve_ctls(i + CTL_0, enc_id); } @@ -378,27 +379,34 @@ static int _dpu_rm_reserve_ctls( static int _dpu_rm_reserve_intf( struct dpu_rm *rm, + struct dpu_global_state *global_state, uint32_t enc_id, uint32_t id) { int idx = id - INTF_0; + if (idx < 0 || idx >= ARRAY_SIZE(rm->intf_blks)) { + DPU_ERROR("invalid intf id: %d", id); + return -EINVAL; + } + if (!rm->intf_blks[idx]) { DPU_ERROR("couldn't find intf id %d\n", id); return -EINVAL; } - if (reserved_by_other(rm->intf_to_enc_id, idx, enc_id)) { + if (reserved_by_other(global_state->intf_to_enc_id, idx, enc_id)) { DPU_ERROR("intf id %d already reserved\n", id); return -ENAVAIL; } - rm->intf_to_enc_id[idx] = enc_id; + global_state->intf_to_enc_id[idx] = enc_id; return 0; } static int _dpu_rm_reserve_intf_related_hw( struct dpu_rm *rm, + struct dpu_global_state *global_state, uint32_t enc_id, struct dpu_encoder_hw_resources *hw_res) { @@ -409,7 +417,7 @@ static int _dpu_rm_reserve_intf_related_hw( if (hw_res->intfs[i] == INTF_MODE_NONE) continue; id = i + INTF_0; - ret = _dpu_rm_reserve_intf(rm, enc_id, id); + ret = _dpu_rm_reserve_intf(rm, global_state, enc_id, id); if (ret) return ret; } @@ -419,24 +427,27 @@ static int _dpu_rm_reserve_intf_related_hw( static int _dpu_rm_make_reservation( struct dpu_rm *rm, + struct dpu_global_state *global_state, struct drm_encoder *enc, struct dpu_rm_requirements *reqs) { int ret; - ret = _dpu_rm_reserve_lms(rm, enc->base.id, reqs); + ret = _dpu_rm_reserve_lms(rm, global_state, enc->base.id, reqs); if (ret) { DPU_ERROR("unable to find appropriate mixers\n"); return ret; } - ret = _dpu_rm_reserve_ctls(rm, enc->base.id, &reqs->topology); + ret = _dpu_rm_reserve_ctls(rm, global_state, enc->base.id, + &reqs->topology); if (ret) { DPU_ERROR("unable to find appropriate CTL\n"); return ret; } - ret = _dpu_rm_reserve_intf_related_hw(rm, enc->base.id, &reqs->hw_res); + ret = _dpu_rm_reserve_intf_related_hw(rm, global_state, enc->base.id, + &reqs->hw_res); if (ret) return ret; @@ -470,33 +481,25 @@ static void _dpu_rm_clear_mapping(uint32_t *res_mapping, int cnt, } } -static void _dpu_rm_release_reservation(struct dpu_rm *rm, uint32_t enc_id) -{ - _dpu_rm_clear_mapping(rm->pingpong_to_enc_id, - ARRAY_SIZE(rm->pingpong_to_enc_id), enc_id); - _dpu_rm_clear_mapping(rm->mixer_to_enc_id, - ARRAY_SIZE(rm->mixer_to_enc_id), enc_id); - _dpu_rm_clear_mapping(rm->ctl_to_enc_id, - ARRAY_SIZE(rm->ctl_to_enc_id), enc_id); - _dpu_rm_clear_mapping(rm->intf_to_enc_id, - ARRAY_SIZE(rm->intf_to_enc_id), enc_id); -} - -void dpu_rm_release(struct dpu_rm *rm, struct drm_encoder *enc) +void dpu_rm_release(struct dpu_global_state *global_state, + struct drm_encoder *enc) { - mutex_lock(&rm->rm_lock); - - _dpu_rm_release_reservation(rm, enc->base.id); - - mutex_unlock(&rm->rm_lock); + _dpu_rm_clear_mapping(global_state->pingpong_to_enc_id, + ARRAY_SIZE(global_state->pingpong_to_enc_id), enc->base.id); + _dpu_rm_clear_mapping(global_state->mixer_to_enc_id, + ARRAY_SIZE(global_state->mixer_to_enc_id), enc->base.id); + _dpu_rm_clear_mapping(global_state->ctl_to_enc_id, + ARRAY_SIZE(global_state->ctl_to_enc_id), enc->base.id); + _dpu_rm_clear_mapping(global_state->intf_to_enc_id, + ARRAY_SIZE(global_state->intf_to_enc_id), enc->base.id); } int dpu_rm_reserve( struct dpu_rm *rm, + struct dpu_global_state *global_state, struct drm_encoder *enc, struct drm_crtc_state *crtc_state, - struct msm_display_topology topology, - bool test_only) + struct msm_display_topology topology) { struct dpu_rm_requirements reqs; int ret; @@ -505,35 +508,31 @@ int dpu_rm_reserve( if (!drm_atomic_crtc_needs_modeset(crtc_state)) return 0; - DRM_DEBUG_KMS("reserving hw for enc %d crtc %d test_only %d\n", - enc->base.id, crtc_state->crtc->base.id, test_only); + if (IS_ERR(global_state)) { + DPU_ERROR("failed to global state\n"); + return PTR_ERR(global_state); + } - mutex_lock(&rm->rm_lock); + DRM_DEBUG_KMS("reserving hw for enc %d crtc %d\n", + enc->base.id, crtc_state->crtc->base.id); ret = _dpu_rm_populate_requirements(enc, &reqs, topology); if (ret) { DPU_ERROR("failed to populate hw requirements\n"); - goto end; + return ret; } - ret = _dpu_rm_make_reservation(rm, enc, &reqs); - if (ret) { + ret = _dpu_rm_make_reservation(rm, global_state, enc, &reqs); + if (ret) DPU_ERROR("failed to reserve hw resources: %d\n", ret); - _dpu_rm_release_reservation(rm, enc->base.id); - } else if (test_only) { - /* test_only: test the reservation and then undo */ - DPU_DEBUG("test_only: discard test [enc: %d]\n", - enc->base.id); - _dpu_rm_release_reservation(rm, enc->base.id); - } -end: - mutex_unlock(&rm->rm_lock); + return ret; } -int dpu_rm_get_assigned_resources(struct dpu_rm *rm, uint32_t enc_id, +int dpu_rm_get_assigned_resources(struct dpu_rm *rm, + struct dpu_global_state *global_state, uint32_t enc_id, enum dpu_hw_blk_type type, struct dpu_hw_blk **blks, int blks_size) { struct dpu_hw_blk **hw_blks; @@ -543,22 +542,22 @@ int dpu_rm_get_assigned_resources(struct dpu_rm *rm, uint32_t enc_id, switch (type) { case DPU_HW_BLK_PINGPONG: hw_blks = rm->pingpong_blks; - hw_to_enc_id = rm->pingpong_to_enc_id; + hw_to_enc_id = global_state->pingpong_to_enc_id; max_blks = ARRAY_SIZE(rm->pingpong_blks); break; case DPU_HW_BLK_LM: hw_blks = rm->mixer_blks; - hw_to_enc_id = rm->mixer_to_enc_id; + hw_to_enc_id = global_state->mixer_to_enc_id; max_blks = ARRAY_SIZE(rm->mixer_blks); break; case DPU_HW_BLK_CTL: hw_blks = rm->ctl_blks; - hw_to_enc_id = rm->ctl_to_enc_id; + hw_to_enc_id = global_state->ctl_to_enc_id; max_blks = ARRAY_SIZE(rm->ctl_blks); break; case DPU_HW_BLK_INTF: hw_blks = rm->intf_blks; - hw_to_enc_id = rm->intf_to_enc_id; + hw_to_enc_id = global_state->intf_to_enc_id; max_blks = ARRAY_SIZE(rm->intf_blks); break; default: diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_rm.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_rm.h index 9c8a436ba6cc..6d2b04f306f0 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_rm.h +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_rm.h @@ -11,6 +11,7 @@ #include "msm_kms.h" #include "dpu_hw_top.h" +struct dpu_global_state; /** * struct dpu_rm - DPU dynamic hardware resource manager @@ -18,10 +19,6 @@ * @mixer_blks: array of layer mixer hardware resources * @ctl_blks: array of ctl hardware resources * @intf_blks: array of intf hardware resources - * @pingpong_to_enc_id: mapping of pingpong hardware resources to an encoder ID - * @mixer_to_enc_id: mapping of mixer hardware resources to an encoder ID - * @ctl_to_enc_id: mapping of ctl hardware resources to an encoder ID - * @intf_to_enc_id: mapping of intf hardware resources to an encoder ID * @lm_max_width: cached layer mixer maximum width * @rm_lock: resource manager mutex */ @@ -31,13 +28,7 @@ struct dpu_rm { struct dpu_hw_blk *ctl_blks[CTL_MAX - CTL_0]; struct dpu_hw_blk *intf_blks[INTF_MAX - INTF_0]; - uint32_t pingpong_to_enc_id[PINGPONG_MAX - PINGPONG_0]; - uint32_t mixer_to_enc_id[LM_MAX - LM_0]; - uint32_t ctl_to_enc_id[CTL_MAX - CTL_0]; - uint32_t intf_to_enc_id[INTF_MAX - INTF_0]; - uint32_t lm_max_width; - struct mutex rm_lock; }; /** @@ -70,14 +61,13 @@ int dpu_rm_destroy(struct dpu_rm *rm); * @drm_enc: DRM Encoder handle * @crtc_state: Proposed Atomic DRM CRTC State handle * @topology: Pointer to topology info for the display - * @test_only: Atomic-Test phase, discard results (unless property overrides) * @Return: 0 on Success otherwise -ERROR */ int dpu_rm_reserve(struct dpu_rm *rm, + struct dpu_global_state *global_state, struct drm_encoder *drm_enc, struct drm_crtc_state *crtc_state, - struct msm_display_topology topology, - bool test_only); + struct msm_display_topology topology); /** * dpu_rm_reserve - Given the encoder for the display chain, release any @@ -86,12 +76,14 @@ int dpu_rm_reserve(struct dpu_rm *rm, * @enc: DRM Encoder handle * @Return: 0 on Success otherwise -ERROR */ -void dpu_rm_release(struct dpu_rm *rm, struct drm_encoder *enc); +void dpu_rm_release(struct dpu_global_state *global_state, + struct drm_encoder *enc); /** * Get hw resources of the given type that are assigned to this encoder. */ -int dpu_rm_get_assigned_resources(struct dpu_rm *rm, uint32_t enc_id, +int dpu_rm_get_assigned_resources(struct dpu_rm *rm, + struct dpu_global_state *global_state, uint32_t enc_id, enum dpu_hw_blk_type type, struct dpu_hw_blk **blks, int blks_size); #endif /* __DPU_RM_H__ */ -- cgit v1.2.3 From c479017faa3a9f6c60ea18ffe44e20b33487e10e Mon Sep 17 00:00:00 2001 From: Ilia Mirkin Date: Wed, 11 Mar 2020 23:51:54 -0400 Subject: drm/msm: avoid double-attaching hdmi/edp bridges Each of hdmi and edp are already attached in msm_*_bridge_init. A second attachment returns -EBUSY, failing the driver load. Tested with HDMI on IFC6410 (APQ8064 / MDP4), but eDP case should be analogous. Fixes: 3ef2f119bd3ed (drm/msm: Use drm_attach_bridge() to attach a bridge to an encoder) Cc: Boris Brezillon Signed-off-by: Ilia Mirkin Reviewed-by: Rob Clark Reviewed-by: Boris Brezillon Tested-by: Bjorn Andersson (hdmi part) Reviewed-by: Bjorn Andersson Signed-off-by: Rob Clark --- drivers/gpu/drm/msm/edp/edp.c | 4 ---- drivers/gpu/drm/msm/hdmi/hdmi.c | 4 ---- 2 files changed, 8 deletions(-) diff --git a/drivers/gpu/drm/msm/edp/edp.c b/drivers/gpu/drm/msm/edp/edp.c index ad4e963ccd9b..106a67473af5 100644 --- a/drivers/gpu/drm/msm/edp/edp.c +++ b/drivers/gpu/drm/msm/edp/edp.c @@ -178,10 +178,6 @@ int msm_edp_modeset_init(struct msm_edp *edp, struct drm_device *dev, goto fail; } - ret = drm_bridge_attach(encoder, edp->bridge, NULL); - if (ret) - goto fail; - priv->bridges[priv->num_bridges++] = edp->bridge; priv->connectors[priv->num_connectors++] = edp->connector; diff --git a/drivers/gpu/drm/msm/hdmi/hdmi.c b/drivers/gpu/drm/msm/hdmi/hdmi.c index 1a9b6289637d..737453b6e596 100644 --- a/drivers/gpu/drm/msm/hdmi/hdmi.c +++ b/drivers/gpu/drm/msm/hdmi/hdmi.c @@ -327,10 +327,6 @@ int msm_hdmi_modeset_init(struct hdmi *hdmi, goto fail; } - ret = drm_bridge_attach(encoder, hdmi->bridge, NULL); - if (ret) - goto fail; - priv->bridges[priv->num_bridges++] = hdmi->bridge; priv->connectors[priv->num_connectors++] = hdmi->connector; -- cgit v1.2.3 From 66be340f827554cb1c8a1ed7dea97920b4085af2 Mon Sep 17 00:00:00 2001 From: Pavel Machek Date: Mon, 9 Mar 2020 11:14:10 +0100 Subject: drm/msm: fix leaks if initialization fails We should free resources in unlikely case of allocation failure. Signed-off-by: Pavel Machek Signed-off-by: Rob Clark --- drivers/gpu/drm/msm/msm_drv.c | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/drivers/gpu/drm/msm/msm_drv.c b/drivers/gpu/drm/msm/msm_drv.c index e4b750b0c2d3..7d985f8865be 100644 --- a/drivers/gpu/drm/msm/msm_drv.c +++ b/drivers/gpu/drm/msm/msm_drv.c @@ -444,8 +444,10 @@ static int msm_drm_init(struct device *dev, struct drm_driver *drv) if (!dev->dma_parms) { dev->dma_parms = devm_kzalloc(dev, sizeof(*dev->dma_parms), GFP_KERNEL); - if (!dev->dma_parms) - return -ENOMEM; + if (!dev->dma_parms) { + ret = -ENOMEM; + goto err_msm_uninit; + } } dma_set_max_seg_size(dev, DMA_BIT_MASK(32)); -- cgit v1.2.3 From 4c145df18f71782007276b670d060d39393fd393 Mon Sep 17 00:00:00 2001 From: "Gustavo A. R. Silva" Date: Thu, 5 Mar 2020 04:54:51 -0600 Subject: drm/msm/msm_gem.h: Replace zero-length array with flexible-array member The current codebase makes use of the zero-length array language extension to the C90 standard, but the preferred mechanism to declare variable-length types such as these ones is a flexible array member[1][2], introduced in C99: struct foo { int stuff; struct boo array[]; }; By making use of the mechanism above, we will get a compiler warning in case the flexible array does not occur last in the structure, which will help us prevent some kind of undefined behavior bugs from being inadvertently introduced[3] to the codebase from now on. Also, notice that, dynamic memory allocations won't be affected by this change: "Flexible array members have incomplete type, and so the sizeof operator may not be applied. As a quirk of the original implementation of zero-length arrays, sizeof evaluates to zero."[1] This issue was found with the help of Coccinelle. [1] https://gcc.gnu.org/onlinedocs/gcc/Zero-Length.html [2] https://github.com/KSPP/linux/issues/21 [3] commit 76497732932f ("cxgb3/l2t: Fix undefined behaviour") Signed-off-by: Gustavo A. R. Silva Signed-off-by: Rob Clark --- drivers/gpu/drm/msm/msm_gem.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/gpu/drm/msm/msm_gem.h b/drivers/gpu/drm/msm/msm_gem.h index 9e0953c2b7ce..37aa556c5f92 100644 --- a/drivers/gpu/drm/msm/msm_gem.h +++ b/drivers/gpu/drm/msm/msm_gem.h @@ -157,7 +157,7 @@ struct msm_gem_submit { uint32_t handle; }; uint64_t iova; - } bos[0]; + } bos[]; }; #endif /* __MSM_GEM_H__ */ -- cgit v1.2.3 From b83caf42532b25d59cf78c135c6433581b3cd77c Mon Sep 17 00:00:00 2001 From: Takashi Iwai Date: Wed, 11 Mar 2020 08:34:10 +0100 Subject: drm/msm: Use scnprintf() for avoiding potential buffer overflow Since snprintf() returns the would-be-output size instead of the actual output size, the succeeding calls may go beyond the given buffer limit. Fix it by replacing with scnprintf(). Signed-off-by: Takashi Iwai Reviewed-by: Jordan Crouse Signed-off-by: Rob Clark --- drivers/gpu/drm/msm/adreno/adreno_gpu.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/gpu/drm/msm/adreno/adreno_gpu.c b/drivers/gpu/drm/msm/adreno/adreno_gpu.c index 7fd29829b2fa..1d5c43c22269 100644 --- a/drivers/gpu/drm/msm/adreno/adreno_gpu.c +++ b/drivers/gpu/drm/msm/adreno/adreno_gpu.c @@ -673,7 +673,7 @@ static char *adreno_gpu_ascii85_encode(u32 *src, size_t len) return NULL; for (i = 0; i < l; i++) - buf_itr += snprintf(buf + buf_itr, buffer_size - buf_itr, "%s", + buf_itr += scnprintf(buf + buf_itr, buffer_size - buf_itr, "%s", ascii85_encode(src[i], out)); return buf; -- cgit v1.2.3 From acc978d7dcd82bc0addd4330555d7ba5fb994bfc Mon Sep 17 00:00:00 2001 From: tongtiangen Date: Thu, 12 Mar 2020 08:25:59 +0800 Subject: drm/msm/dpu: Remove some set but not used variables Fixes gcc '-Wunused-but-set-variable' warning: drivers/gpu/drm/msm/disp/dpu1/dpu_kms.c: In function _dpu_debugfs_show_regset32: drivers/gpu/drm/msm/disp/dpu1/dpu_kms.c:142:26: warning: variable priv set but not used [-Wunused-but-set-variable] drivers/gpu/drm/msm/disp/dpu1/dpu_kms.c: In function dpu_kms_prepare_commit: drivers/gpu/drm/msm/disp/dpu1/dpu_kms.c:271:21: warning: variable dev set but not used [-Wunused-but-set-variable] drivers/gpu/drm/msm/disp/dpu1/dpu_kms.c: In function _dpu_kms_hw_destroy: drivers/gpu/drm/msm/disp/dpu1/dpu_kms.c:555:21: warning: variable dev set but not used [-Wunused-but-set-variable] drivers/gpu/drm/msm/disp/dpu1/dpu_kms.c: In function dpu_kms_hw_init: drivers/gpu/drm/msm/disp/dpu1/dpu_kms.c:763:26: warning: variable priv set but not used [-Wunused-but-set-variable] drivers/gpu/drm/msm/disp/dpu1/dpu_kms.c: In function dpu_runtime_suspend: drivers/gpu/drm/msm/disp/dpu1/dpu_kms.c:1021:21: warning: variable ddev set but not used [-Wunused-but-set-variable] Reported-by: Hulk Robot Signed-off-by: tongtiangen Signed-off-by: Rob Clark --- drivers/gpu/drm/msm/disp/dpu1/dpu_kms.c | 16 ---------------- 1 file changed, 16 deletions(-) diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.c index 59d42ff9da64..ce19f1d39367 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.c +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.c @@ -138,16 +138,12 @@ static int _dpu_debugfs_show_regset32(struct seq_file *s, void *data) { struct dpu_debugfs_regset32 *regset = s->private; struct dpu_kms *dpu_kms = regset->dpu_kms; - struct drm_device *dev; - struct msm_drm_private *priv; void __iomem *base; uint32_t i, addr; if (!dpu_kms->mmio) return 0; - dev = dpu_kms->dev; - priv = dev->dev_private; base = dpu_kms->mmio + regset->offset; /* insert padding spaces, if needed */ @@ -346,8 +342,6 @@ static ktime_t dpu_kms_vsync_time(struct msm_kms *kms, struct drm_crtc *crtc) static void dpu_kms_prepare_commit(struct msm_kms *kms, struct drm_atomic_state *state) { - struct dpu_kms *dpu_kms; - struct drm_device *dev; struct drm_crtc *crtc; struct drm_crtc_state *crtc_state; struct drm_encoder *encoder; @@ -355,8 +349,6 @@ static void dpu_kms_prepare_commit(struct msm_kms *kms, if (!kms) return; - dpu_kms = to_dpu_kms(kms); - dev = dpu_kms->dev; /* Call prepare_commit for all affected encoders */ for_each_new_crtc_in_state(state, crtc, crtc_state, i) { @@ -631,11 +623,8 @@ static long dpu_kms_round_pixclk(struct msm_kms *kms, unsigned long rate, static void _dpu_kms_hw_destroy(struct dpu_kms *dpu_kms) { - struct drm_device *dev; int i; - dev = dpu_kms->dev; - if (dpu_kms->hw_intr) dpu_hw_intr_destroy(dpu_kms->hw_intr); dpu_kms->hw_intr = NULL; @@ -839,7 +828,6 @@ static int dpu_kms_hw_init(struct msm_kms *kms) { struct dpu_kms *dpu_kms; struct drm_device *dev; - struct msm_drm_private *priv; int i, rc = -EINVAL; if (!kms) { @@ -854,8 +842,6 @@ static int dpu_kms_hw_init(struct msm_kms *kms) if (rc) return rc; - priv = dev->dev_private; - atomic_set(&dpu_kms->bandwidth_ref, 0); dpu_kms->mmio = msm_ioremap(dpu_kms->pdev, "mdp", "mdp"); @@ -1102,10 +1088,8 @@ static int __maybe_unused dpu_runtime_suspend(struct device *dev) int rc = -1; struct platform_device *pdev = to_platform_device(dev); struct dpu_kms *dpu_kms = platform_get_drvdata(pdev); - struct drm_device *ddev; struct dss_module_power *mp = &dpu_kms->mp; - ddev = dpu_kms->dev; rc = msm_dss_enable_clk(mp->clk_config, mp->num_clk, false); if (rc) DPU_ERROR("clock disable failed rc:%d\n", rc); -- cgit v1.2.3 From 327903242acda90fa01577049c7a05188447cfcc Mon Sep 17 00:00:00 2001 From: Zheng Bin Date: Thu, 23 Jan 2020 11:40:40 +0800 Subject: drm/msm/dpu: fix comparing pointer to 0 in dpu_encoder_phys_cmd.c Fixes coccicheck warning: drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_cmd.c:414:52-53: WARNING comparing pointer to 0 drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_cmd.c:443:56-57: WARNING comparing pointer to 0 Reported-by: Hulk Robot Signed-off-by: Zheng Bin Signed-off-by: Rob Clark --- drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_cmd.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_cmd.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_cmd.c index 39e1e280ba44..8493d68ad841 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_cmd.c +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_cmd.c @@ -411,7 +411,7 @@ static void _dpu_encoder_phys_cmd_pingpong_config( to_dpu_encoder_phys_cmd(phys_enc); if (!phys_enc->hw_pp || !phys_enc->hw_ctl->ops.setup_intf_cfg) { - DPU_ERROR("invalid arg(s), enc %d\n", phys_enc != 0); + DPU_ERROR("invalid arg(s), enc %d\n", phys_enc != NULL); return; } @@ -440,7 +440,7 @@ static void dpu_encoder_phys_cmd_enable_helper( u32 flush_mask = 0; if (!phys_enc->hw_pp) { - DPU_ERROR("invalid arg(s), encoder %d\n", phys_enc != 0); + DPU_ERROR("invalid arg(s), encoder %d\n", phys_enc != NULL); return; } -- cgit v1.2.3 From 30801221a73781ecefc6b72ce16a9775df23a27b Mon Sep 17 00:00:00 2001 From: Zheng Bin Date: Thu, 23 Jan 2020 11:40:41 +0800 Subject: drm/msm/dpu: fix comparing pointer to 0 in dpu_encoder_phys_vid.c Fixes coccicheck warning: drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_vid.c:242:48-49: WARNING comparing pointer to 0 drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_vid.c:562:25-26: WARNING comparing pointer to 0 drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_vid.c:562:48-49: WARNING comparing pointer to 0 Reported-by: Hulk Robot Signed-off-by: Zheng Bin Signed-off-by: Rob Clark --- drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_vid.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_vid.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_vid.c index c71c18de5966..b5a49050d131 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_vid.c +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_vid.c @@ -239,7 +239,7 @@ static void dpu_encoder_phys_vid_setup_timing_engine( struct dpu_hw_intf_cfg intf_cfg = { 0 }; if (!phys_enc->hw_ctl->ops.setup_intf_cfg) { - DPU_ERROR("invalid encoder %d\n", phys_enc != 0); + DPU_ERROR("invalid encoder %d\n", phys_enc != NULL); return; } @@ -559,7 +559,7 @@ static void dpu_encoder_phys_vid_disable(struct dpu_encoder_phys *phys_enc) if (!phys_enc->hw_intf) { DPU_ERROR("invalid hw_intf %d hw_ctl %d\n", - phys_enc->hw_intf != 0, phys_enc->hw_ctl != 0); + phys_enc->hw_intf != NULL, phys_enc->hw_ctl != NULL); return; } -- cgit v1.2.3 From a41aa44be1146c8ce6206c8c46f545125b665fe7 Mon Sep 17 00:00:00 2001 From: Zheng Bin Date: Thu, 23 Jan 2020 11:40:42 +0800 Subject: drm/msm/dpu: fix comparing pointer to 0 in dpu_vbif.c Fixes coccicheck warning: drivers/gpu/drm/msm/disp/dpu1/dpu_vbif.c:27:51-52: WARNING comparing pointer to 0 drivers/gpu/drm/msm/disp/dpu1/dpu_vbif.c:109:51-52: WARNING comparing pointer to 0 drivers/gpu/drm/msm/disp/dpu1/dpu_vbif.c:167:12-13: WARNING comparing pointer to 0 drivers/gpu/drm/msm/disp/dpu1/dpu_vbif.c:167:22-23: WARNING comparing pointer to 0 Reported-by: Hulk Robot Signed-off-by: Zheng Bin Signed-off-by: Rob Clark --- drivers/gpu/drm/msm/disp/dpu1/dpu_vbif.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_vbif.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_vbif.c index 93ab36bd8df3..5e8c3f3e6625 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_vbif.c +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_vbif.c @@ -24,7 +24,7 @@ static int _dpu_vbif_wait_for_xin_halt(struct dpu_hw_vbif *vbif, u32 xin_id) int rc; if (!vbif || !vbif->cap || !vbif->ops.get_halt_ctrl) { - DPU_ERROR("invalid arguments vbif %d\n", vbif != 0); + DPU_ERROR("invalid arguments vbif %d\n", vbif != NULL); return -EINVAL; } @@ -106,7 +106,7 @@ static u32 _dpu_vbif_get_ot_limit(struct dpu_hw_vbif *vbif, u32 val; if (!vbif || !vbif->cap) { - DPU_ERROR("invalid arguments vbif %d\n", vbif != 0); + DPU_ERROR("invalid arguments vbif %d\n", vbif != NULL); return -EINVAL; } @@ -164,7 +164,7 @@ void dpu_vbif_set_ot_limit(struct dpu_kms *dpu_kms, if (!vbif || !mdp) { DPU_DEBUG("invalid arguments vbif %d mdp %d\n", - vbif != 0, mdp != 0); + vbif != NULL, mdp != NULL); return; } -- cgit v1.2.3 From e6790f7210062ea551c378250ea5b70851d35e95 Mon Sep 17 00:00:00 2001 From: Zheng Bin Date: Thu, 23 Jan 2020 11:40:43 +0800 Subject: drm/msm/dpu: fix comparing pointer to 0 in dpu_encoder.c Fixes coccicheck warning: drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c:464:56-57: WARNING comparing pointer to 0 drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c:571:15-16: WARNING comparing pointer to 0 drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c:571:32-33: WARNING comparing pointer to 0 drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c:571:49-50: WARNING comparing pointer to 0 drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c:1968:17-18: WARNING comparing pointer to 0 drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c:1981:17-18: WARNING comparing pointer to 0 drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c:2012:51-52: WARNING comparing pointer to 0 Reported-by: Hulk Robot Signed-off-by: Zheng Bin Signed-off-by: Rob Clark --- drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c index 5afde2e7fef0..42bf5c82ada4 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c @@ -459,7 +459,7 @@ void dpu_encoder_helper_split_config( struct msm_display_info *disp_info; if (!phys_enc->hw_mdptop || !phys_enc->parent) { - DPU_ERROR("invalid arg(s), encoder %d\n", phys_enc != 0); + DPU_ERROR("invalid arg(s), encoder %d\n", phys_enc != NULL); return; } @@ -567,7 +567,7 @@ static int dpu_encoder_virt_atomic_check( if (!drm_enc || !crtc_state || !conn_state) { DPU_ERROR("invalid arg(s), drm_enc %d, crtc/conn state %d/%d\n", - drm_enc != 0, crtc_state != 0, conn_state != 0); + drm_enc != NULL, crtc_state != NULL, conn_state != NULL); return -EINVAL; } @@ -1947,7 +1947,7 @@ static int dpu_encoder_virt_add_phys_encs( if (IS_ERR_OR_NULL(enc)) { DPU_ERROR_ENC(dpu_enc, "failed to init vid enc: %ld\n", PTR_ERR(enc)); - return enc == 0 ? -EINVAL : PTR_ERR(enc); + return enc == NULL ? -EINVAL : PTR_ERR(enc); } dpu_enc->phys_encs[dpu_enc->num_phys_encs] = enc; @@ -1960,7 +1960,7 @@ static int dpu_encoder_virt_add_phys_encs( if (IS_ERR_OR_NULL(enc)) { DPU_ERROR_ENC(dpu_enc, "failed to init cmd enc: %ld\n", PTR_ERR(enc)); - return enc == 0 ? -EINVAL : PTR_ERR(enc); + return enc == NULL ? -EINVAL : PTR_ERR(enc); } dpu_enc->phys_encs[dpu_enc->num_phys_encs] = enc; @@ -1991,7 +1991,7 @@ static int dpu_encoder_setup_display(struct dpu_encoder_virt *dpu_enc, struct dpu_enc_phys_init_params phys_params; if (!dpu_enc) { - DPU_ERROR("invalid arg(s), enc %d\n", dpu_enc != 0); + DPU_ERROR("invalid arg(s), enc %d\n", dpu_enc != NULL); return -EINVAL; } -- cgit v1.2.3 From 0478b4fc5f37f4d494245fe7bcce3f531cf380e9 Mon Sep 17 00:00:00 2001 From: Jordan Crouse Date: Fri, 14 Feb 2020 11:36:44 -0700 Subject: drm/msm/a5xx: Always set an OPP supported hardware value If the opp table specifies opp-supported-hw as a property but the driver has not set a supported hardware value the OPP subsystem will reject all the table entries. Set a "default" value that will match the default table entries but not conflict with any possible real bin values. Also fix a small memory leak and free the buffer allocated by nvmem_cell_read(). Signed-off-by: Jordan Crouse Reviewed-by: Eric Anholt Signed-off-by: Rob Clark --- drivers/gpu/drm/msm/adreno/a5xx_gpu.c | 27 ++++++++++++++++++++------- 1 file changed, 20 insertions(+), 7 deletions(-) diff --git a/drivers/gpu/drm/msm/adreno/a5xx_gpu.c b/drivers/gpu/drm/msm/adreno/a5xx_gpu.c index 7d9e63e20ded..724024a2243a 100644 --- a/drivers/gpu/drm/msm/adreno/a5xx_gpu.c +++ b/drivers/gpu/drm/msm/adreno/a5xx_gpu.c @@ -1446,18 +1446,31 @@ static const struct adreno_gpu_funcs funcs = { static void check_speed_bin(struct device *dev) { struct nvmem_cell *cell; - u32 bin, val; + u32 val; + + /* + * If the OPP table specifies a opp-supported-hw property then we have + * to set something with dev_pm_opp_set_supported_hw() or the table + * doesn't get populated so pick an arbitrary value that should + * ensure the default frequencies are selected but not conflict with any + * actual bins + */ + val = 0x80; cell = nvmem_cell_get(dev, "speed_bin"); - /* If a nvmem cell isn't defined, nothing to do */ - if (IS_ERR(cell)) - return; + if (!IS_ERR(cell)) { + void *buf = nvmem_cell_read(cell, NULL); + + if (!IS_ERR(buf)) { + u8 bin = *((u8 *) buf); - bin = *((u32 *) nvmem_cell_read(cell, NULL)); - nvmem_cell_put(cell); + val = (1 << bin); + kfree(buf); + } - val = (1 << bin); + nvmem_cell_put(cell); + } dev_pm_opp_set_supported_hw(dev, &val, 1); } -- cgit v1.2.3 From e515af8d4a6f18f96c360724568a7497868101a8 Mon Sep 17 00:00:00 2001 From: Rob Clark Date: Tue, 18 Feb 2020 13:20:12 -0800 Subject: drm/msm: devcoredump should dump MSM_SUBMIT_BO_DUMP buffers Also log buffers with the DUMP flag set, to ensure we capture all useful cmdstream in crashdump state with modern mesa. Otherwise we miss out on the contents of "state object" cmdstream buffers. v2: add missing 'inline' Signed-off-by: Rob Clark Reviewed-by: Jordan Crouse --- drivers/gpu/drm/msm/msm_gem.h | 10 ++++++++++ drivers/gpu/drm/msm/msm_gpu.c | 28 +++++++++++++++++++++++----- drivers/gpu/drm/msm/msm_rd.c | 8 +------- 3 files changed, 34 insertions(+), 12 deletions(-) diff --git a/drivers/gpu/drm/msm/msm_gem.h b/drivers/gpu/drm/msm/msm_gem.h index 37aa556c5f92..30584eaf8cc8 100644 --- a/drivers/gpu/drm/msm/msm_gem.h +++ b/drivers/gpu/drm/msm/msm_gem.h @@ -160,4 +160,14 @@ struct msm_gem_submit { } bos[]; }; +/* helper to determine of a buffer in submit should be dumped, used for both + * devcoredump and debugfs cmdstream dumping: + */ +static inline bool +should_dump(struct msm_gem_submit *submit, int idx) +{ + extern bool rd_full; + return rd_full || (submit->bos[idx].flags & MSM_SUBMIT_BO_DUMP); +} + #endif /* __MSM_GEM_H__ */ diff --git a/drivers/gpu/drm/msm/msm_gpu.c b/drivers/gpu/drm/msm/msm_gpu.c index 18f3a5c53ffb..615c5cda5389 100644 --- a/drivers/gpu/drm/msm/msm_gpu.c +++ b/drivers/gpu/drm/msm/msm_gpu.c @@ -355,16 +355,34 @@ static void msm_gpu_crashstate_capture(struct msm_gpu *gpu, state->cmd = kstrdup(cmd, GFP_KERNEL); if (submit) { - int i; - - state->bos = kcalloc(submit->nr_cmds, + int i, nr = 0; + + /* count # of buffers to dump: */ + for (i = 0; i < submit->nr_bos; i++) + if (should_dump(submit, i)) + nr++; + /* always dump cmd bo's, but don't double count them: */ + for (i = 0; i < submit->nr_cmds; i++) + if (!should_dump(submit, submit->cmd[i].idx)) + nr++; + + state->bos = kcalloc(nr, sizeof(struct msm_gpu_state_bo), GFP_KERNEL); + for (i = 0; i < submit->nr_bos; i++) { + if (should_dump(submit, i)) { + msm_gpu_crashstate_get_bo(state, submit->bos[i].obj, + submit->bos[i].iova, submit->bos[i].flags); + } + } + for (i = 0; state->bos && i < submit->nr_cmds; i++) { int idx = submit->cmd[i].idx; - msm_gpu_crashstate_get_bo(state, submit->bos[idx].obj, - submit->bos[idx].iova, submit->bos[idx].flags); + if (!should_dump(submit, submit->cmd[i].idx)) { + msm_gpu_crashstate_get_bo(state, submit->bos[idx].obj, + submit->bos[idx].iova, submit->bos[idx].flags); + } } } diff --git a/drivers/gpu/drm/msm/msm_rd.c b/drivers/gpu/drm/msm/msm_rd.c index af7ceb246c7c..732f65df5c4f 100644 --- a/drivers/gpu/drm/msm/msm_rd.c +++ b/drivers/gpu/drm/msm/msm_rd.c @@ -43,7 +43,7 @@ #include "msm_gpu.h" #include "msm_gem.h" -static bool rd_full = false; +bool rd_full = false; MODULE_PARM_DESC(rd_full, "If true, $debugfs/.../rd will snapshot all buffer contents"); module_param_named(rd_full, rd_full, bool, 0600); @@ -336,12 +336,6 @@ static void snapshot_buf(struct msm_rd_state *rd, msm_gem_put_vaddr(&obj->base); } -static bool -should_dump(struct msm_gem_submit *submit, int idx) -{ - return rd_full || (submit->bos[idx].flags & MSM_SUBMIT_BO_DUMP); -} - /* called under struct_mutex */ void msm_rd_dump_submit(struct msm_rd_state *rd, struct msm_gem_submit *submit, const char *fmt, ...) -- cgit v1.2.3 From e6cada895a37af1a1bebbeadff8600942d1029cb Mon Sep 17 00:00:00 2001 From: Rob Clark Date: Thu, 20 Feb 2020 10:00:09 -0800 Subject: drm/msm/a6xx: Fix CP_MEMPOOL state name Signed-off-by: Rob Clark Reviewed-by: Jordan Crouse --- drivers/gpu/drm/msm/adreno/a6xx_gpu_state.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gpu_state.h b/drivers/gpu/drm/msm/adreno/a6xx_gpu_state.h index e67c20c415af..24c974c293e5 100644 --- a/drivers/gpu/drm/msm/adreno/a6xx_gpu_state.h +++ b/drivers/gpu/drm/msm/adreno/a6xx_gpu_state.h @@ -379,7 +379,7 @@ static const struct a6xx_indexed_registers { }; static const struct a6xx_indexed_registers a6xx_cp_mempool_indexed = { - "CP_MEMPOOOL", REG_A6XX_CP_MEM_POOL_DBG_ADDR, + "CP_MEMPOOL", REG_A6XX_CP_MEM_POOL_DBG_ADDR, REG_A6XX_CP_MEM_POOL_DBG_DATA, 0x2060, }; -- cgit v1.2.3 From 00d9220ec5ed3513905cfde94bb4337633a061e3 Mon Sep 17 00:00:00 2001 From: Brian Masney Date: Mon, 9 Mar 2020 07:18:04 -0400 Subject: dt-bindings: display: msm: gmu: move sram property to gpu bindings The sram property was incorrectly added to the GMU binding when it really belongs with the GPU binding instead. Let's go ahead and move it. While changes are being made here, let's update the sram property description to mention that this property is only valid for a3xx and a4xx GPUs. The a3xx/a4xx example in the GPU is replaced with what was in the GMU. Signed-off-by: Brian Masney Fixes: 198a72c8f9ee ("dt-bindings: display: msm: gmu: add optional ocmem property") Acked-by: Jordan Crouse Signed-off-by: Rob Clark --- .../devicetree/bindings/display/msm/gmu.txt | 51 -------------------- .../devicetree/bindings/display/msm/gpu.txt | 55 +++++++++++++++++----- 2 files changed, 42 insertions(+), 64 deletions(-) diff --git a/Documentation/devicetree/bindings/display/msm/gmu.txt b/Documentation/devicetree/bindings/display/msm/gmu.txt index bf9c7a2a495c..90af5b0a56a9 100644 --- a/Documentation/devicetree/bindings/display/msm/gmu.txt +++ b/Documentation/devicetree/bindings/display/msm/gmu.txt @@ -31,10 +31,6 @@ Required properties: - iommus: phandle to the adreno iommu - operating-points-v2: phandle to the OPP operating points -Optional properties: -- sram: phandle to the On Chip Memory (OCMEM) that's present on some Snapdragon - SoCs. See Documentation/devicetree/bindings/sram/qcom,ocmem.yaml. - Example: / { @@ -67,50 +63,3 @@ Example: operating-points-v2 = <&gmu_opp_table>; }; }; - -a3xx example with OCMEM support: - -/ { - ... - - gpu: adreno@fdb00000 { - compatible = "qcom,adreno-330.2", - "qcom,adreno"; - reg = <0xfdb00000 0x10000>; - reg-names = "kgsl_3d0_reg_memory"; - interrupts = ; - interrupt-names = "kgsl_3d0_irq"; - clock-names = "core", - "iface", - "mem_iface"; - clocks = <&mmcc OXILI_GFX3D_CLK>, - <&mmcc OXILICX_AHB_CLK>, - <&mmcc OXILICX_AXI_CLK>; - sram = <&gmu_sram>; - power-domains = <&mmcc OXILICX_GDSC>; - operating-points-v2 = <&gpu_opp_table>; - iommus = <&gpu_iommu 0>; - }; - - ocmem@fdd00000 { - compatible = "qcom,msm8974-ocmem"; - - reg = <0xfdd00000 0x2000>, - <0xfec00000 0x180000>; - reg-names = "ctrl", - "mem"; - - clocks = <&rpmcc RPM_SMD_OCMEMGX_CLK>, - <&mmcc OCMEMCX_OCMEMNOC_CLK>; - clock-names = "core", - "iface"; - - #address-cells = <1>; - #size-cells = <1>; - - gmu_sram: gmu-sram@0 { - reg = <0x0 0x100000>; - ranges = <0 0 0xfec00000 0x100000>; - }; - }; -}; diff --git a/Documentation/devicetree/bindings/display/msm/gpu.txt b/Documentation/devicetree/bindings/display/msm/gpu.txt index 7edc298a15f2..fd779cd6994d 100644 --- a/Documentation/devicetree/bindings/display/msm/gpu.txt +++ b/Documentation/devicetree/bindings/display/msm/gpu.txt @@ -35,25 +35,54 @@ Required properties: bring the GPU out of secure mode. - firmware-name: optional property of the 'zap-shader' node, listing the relative path of the device specific zap firmware. +- sram: phandle to the On Chip Memory (OCMEM) that's present on some a3xx and + a4xx Snapdragon SoCs. See + Documentation/devicetree/bindings/sram/qcom,ocmem.yaml. -Example 3xx/4xx/a5xx: +Example 3xx/4xx: / { ... - gpu: qcom,kgsl-3d0@4300000 { - compatible = "qcom,adreno-320.2", "qcom,adreno"; - reg = <0x04300000 0x20000>; + gpu: adreno@fdb00000 { + compatible = "qcom,adreno-330.2", + "qcom,adreno"; + reg = <0xfdb00000 0x10000>; reg-names = "kgsl_3d0_reg_memory"; - interrupts = ; - clock-names = - "core", - "iface", - "mem_iface"; - clocks = - <&mmcc GFX3D_CLK>, - <&mmcc GFX3D_AHB_CLK>, - <&mmcc MMSS_IMEM_AHB_CLK>; + interrupts = ; + interrupt-names = "kgsl_3d0_irq"; + clock-names = "core", + "iface", + "mem_iface"; + clocks = <&mmcc OXILI_GFX3D_CLK>, + <&mmcc OXILICX_AHB_CLK>, + <&mmcc OXILICX_AXI_CLK>; + sram = <&gpu_sram>; + power-domains = <&mmcc OXILICX_GDSC>; + operating-points-v2 = <&gpu_opp_table>; + iommus = <&gpu_iommu 0>; + }; + + gpu_sram: ocmem@fdd00000 { + compatible = "qcom,msm8974-ocmem"; + + reg = <0xfdd00000 0x2000>, + <0xfec00000 0x180000>; + reg-names = "ctrl", + "mem"; + + clocks = <&rpmcc RPM_SMD_OCMEMGX_CLK>, + <&mmcc OCMEMCX_OCMEMNOC_CLK>; + clock-names = "core", + "iface"; + + #address-cells = <1>; + #size-cells = <1>; + + gpu_sram: gpu-sram@0 { + reg = <0x0 0x100000>; + ranges = <0 0 0xfec00000 0x100000>; + }; }; }; -- cgit v1.2.3 From a168b512de1ae82da888d5e376bfdb92b61ea8e6 Mon Sep 17 00:00:00 2001 From: Jordan Crouse Date: Thu, 19 Mar 2020 21:36:10 -0600 Subject: dt-bindings: display: msm: Convert GMU bindings to YAML Convert display/msm/gmu.txt to display/msm/gmu.yaml and remove the old text bindings. The 'sram' text from the old binding never applied to the GMU so it was not converted but all the other properties were correct. Acked-by: Sam Ravnborg Reviewed-by: Rob Herring Signed-off-by: Jordan Crouse Signed-off-by: Rob Clark --- .../devicetree/bindings/display/msm/gmu.txt | 65 ----------- .../devicetree/bindings/display/msm/gmu.yaml | 123 +++++++++++++++++++++ 2 files changed, 123 insertions(+), 65 deletions(-) delete mode 100644 Documentation/devicetree/bindings/display/msm/gmu.txt create mode 100644 Documentation/devicetree/bindings/display/msm/gmu.yaml diff --git a/Documentation/devicetree/bindings/display/msm/gmu.txt b/Documentation/devicetree/bindings/display/msm/gmu.txt deleted file mode 100644 index 90af5b0a56a9..000000000000 --- a/Documentation/devicetree/bindings/display/msm/gmu.txt +++ /dev/null @@ -1,65 +0,0 @@ -Qualcomm adreno/snapdragon GMU (Graphics management unit) - -The GMU is a programmable power controller for the GPU. the CPU controls the -GMU which in turn handles power controls for the GPU. - -Required properties: -- compatible: "qcom,adreno-gmu-XYZ.W", "qcom,adreno-gmu" - for example: "qcom,adreno-gmu-630.2", "qcom,adreno-gmu" - Note that you need to list the less specific "qcom,adreno-gmu" - for generic matches and the more specific identifier to identify - the specific device. -- reg: Physical base address and length of the GMU registers. -- reg-names: Matching names for the register regions - * "gmu" - * "gmu_pdc" - * "gmu_pdc_seg" -- interrupts: The interrupt signals from the GMU. -- interrupt-names: Matching names for the interrupts - * "hfi" - * "gmu" -- clocks: phandles to the device clocks -- clock-names: Matching names for the clocks - * "gmu" - * "cxo" - * "axi" - * "mnoc" -- power-domains: should be: - <&clock_gpucc GPU_CX_GDSC> - <&clock_gpucc GPU_GX_GDSC> -- power-domain-names: Matching names for the power domains -- iommus: phandle to the adreno iommu -- operating-points-v2: phandle to the OPP operating points - -Example: - -/ { - ... - - gmu: gmu@506a000 { - compatible="qcom,adreno-gmu-630.2", "qcom,adreno-gmu"; - - reg = <0x506a000 0x30000>, - <0xb280000 0x10000>, - <0xb480000 0x10000>; - reg-names = "gmu", "gmu_pdc", "gmu_pdc_seq"; - - interrupts = , - ; - interrupt-names = "hfi", "gmu"; - - clocks = <&gpucc GPU_CC_CX_GMU_CLK>, - <&gpucc GPU_CC_CXO_CLK>, - <&gcc GCC_DDRSS_GPU_AXI_CLK>, - <&gcc GCC_GPU_MEMNOC_GFX_CLK>; - clock-names = "gmu", "cxo", "axi", "memnoc"; - - power-domains = <&gpucc GPU_CX_GDSC>, - <&gpucc GPU_GX_GDSC>; - power-domain-names = "cx", "gx"; - - iommus = <&adreno_smmu 5>; - - operating-points-v2 = <&gmu_opp_table>; - }; -}; diff --git a/Documentation/devicetree/bindings/display/msm/gmu.yaml b/Documentation/devicetree/bindings/display/msm/gmu.yaml new file mode 100644 index 000000000000..0b8736a9384e --- /dev/null +++ b/Documentation/devicetree/bindings/display/msm/gmu.yaml @@ -0,0 +1,123 @@ +# SPDX-License-Identifier: GPL-2.0-only +# Copyright 2019-2020, The Linux Foundation, All Rights Reserved +%YAML 1.2 +--- + +$id: "http://devicetree.org/schemas/display/msm/gmu.yaml#" +$schema: "http://devicetree.org/meta-schemas/core.yaml#" + +title: Devicetree bindings for the GMU attached to certain Adreno GPUs + +maintainers: + - Rob Clark + +description: | + These bindings describe the Graphics Management Unit (GMU) that is attached + to members of the Adreno A6xx GPU family. The GMU provides on-device power + management and support to improve power efficiency and reduce the load on + the CPU. + +properties: + compatible: + items: + - enum: + - qcom,adreno-gmu-630.2 + - const: qcom,adreno-gmu + + reg: + items: + - description: Core GMU registers + - description: GMU PDC registers + - description: GMU PDC sequence registers + + reg-names: + items: + - const: gmu + - const: gmu_pdc + - const: gmu_pdc_seq + + clocks: + items: + - description: GMU clock + - description: GPU CX clock + - description: GPU AXI clock + - description: GPU MEMNOC clock + + clock-names: + items: + - const: gmu + - const: cxo + - const: axi + - const: memnoc + + interrupts: + items: + - description: GMU HFI interrupt + - description: GMU interrupt + + + interrupt-names: + items: + - const: hfi + - const: gmu + + power-domains: + items: + - description: CX power domain + - description: GX power domain + + power-domain-names: + items: + - const: cx + - const: gx + + iommus: + maxItems: 1 + + operating-points-v2: true + +required: + - compatible + - reg + - reg-names + - clocks + - clock-names + - interrupts + - interrupt-names + - power-domains + - power-domain-names + - iommus + - operating-points-v2 + +examples: + - | + #include + #include + #include + #include + + gmu: gmu@506a000 { + compatible="qcom,adreno-gmu-630.2", "qcom,adreno-gmu"; + + reg = <0x506a000 0x30000>, + <0xb280000 0x10000>, + <0xb480000 0x10000>; + reg-names = "gmu", "gmu_pdc", "gmu_pdc_seq"; + + clocks = <&gpucc GPU_CC_CX_GMU_CLK>, + <&gpucc GPU_CC_CXO_CLK>, + <&gcc GCC_DDRSS_GPU_AXI_CLK>, + <&gcc GCC_GPU_MEMNOC_GFX_CLK>; + clock-names = "gmu", "cxo", "axi", "memnoc"; + + interrupts = , + ; + interrupt-names = "hfi", "gmu"; + + power-domains = <&gpucc GPU_CX_GDSC>, + <&gpucc GPU_GX_GDSC>; + power-domain-names = "cx", "gx"; + + iommus = <&adreno_smmu 5>; + operating-points-v2 = <&gmu_opp_table>; + }; -- cgit v1.2.3 From a5fb8b918920c6f7706a8b5b8ea535a7f077a7f6 Mon Sep 17 00:00:00 2001 From: Jordan Crouse Date: Thu, 19 Mar 2020 21:36:11 -0600 Subject: drm/msm/a6xx: Use the DMA API for GMU memory objects The GMU has very few memory allocations and uses a flat memory space so there is no good reason to go out of our way to bypass the DMA APIs which were basically designed for this exact scenario. v7: Check return value of dma_set_mask_and_coherent v4: Use dma_alloc_wc() v3: Set the dma mask correctly and use dma_addr_t for the iova type v2: Pass force_dma false to of_dma_configure to require that the DMA region be set up and return error from of_dma_configure to fail probe. Reviewed-by: Michael J. Ruhl Signed-off-by: Jordan Crouse Signed-off-by: Rob Clark --- drivers/gpu/drm/msm/adreno/a6xx_gmu.c | 115 ++++------------------------------ drivers/gpu/drm/msm/adreno/a6xx_gmu.h | 6 +- 2 files changed, 14 insertions(+), 107 deletions(-) diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gmu.c b/drivers/gpu/drm/msm/adreno/a6xx_gmu.c index 748cd379065f..c4e71abbdd53 100644 --- a/drivers/gpu/drm/msm/adreno/a6xx_gmu.c +++ b/drivers/gpu/drm/msm/adreno/a6xx_gmu.c @@ -2,6 +2,7 @@ /* Copyright (c) 2017-2019 The Linux Foundation. All rights reserved. */ #include +#include #include #include #include @@ -920,21 +921,10 @@ int a6xx_gmu_stop(struct a6xx_gpu *a6xx_gpu) static void a6xx_gmu_memory_free(struct a6xx_gmu *gmu, struct a6xx_gmu_bo *bo) { - int count, i; - u64 iova; - if (IS_ERR_OR_NULL(bo)) return; - count = bo->size >> PAGE_SHIFT; - iova = bo->iova; - - for (i = 0; i < count; i++, iova += PAGE_SIZE) { - iommu_unmap(gmu->domain, iova, PAGE_SIZE); - __free_pages(bo->pages[i], 0); - } - - kfree(bo->pages); + dma_free_wc(gmu->dev, bo->size, bo->virt, bo->iova); kfree(bo); } @@ -942,7 +932,6 @@ static struct a6xx_gmu_bo *a6xx_gmu_memory_alloc(struct a6xx_gmu *gmu, size_t size) { struct a6xx_gmu_bo *bo; - int ret, count, i; bo = kzalloc(sizeof(*bo), GFP_KERNEL); if (!bo) @@ -950,86 +939,14 @@ static struct a6xx_gmu_bo *a6xx_gmu_memory_alloc(struct a6xx_gmu *gmu, bo->size = PAGE_ALIGN(size); - count = bo->size >> PAGE_SHIFT; + bo->virt = dma_alloc_wc(gmu->dev, bo->size, &bo->iova, GFP_KERNEL); - bo->pages = kcalloc(count, sizeof(struct page *), GFP_KERNEL); - if (!bo->pages) { + if (!bo->virt) { kfree(bo); return ERR_PTR(-ENOMEM); } - for (i = 0; i < count; i++) { - bo->pages[i] = alloc_page(GFP_KERNEL); - if (!bo->pages[i]) - goto err; - } - - bo->iova = gmu->uncached_iova_base; - - for (i = 0; i < count; i++) { - ret = iommu_map(gmu->domain, - bo->iova + (PAGE_SIZE * i), - page_to_phys(bo->pages[i]), PAGE_SIZE, - IOMMU_READ | IOMMU_WRITE); - - if (ret) { - DRM_DEV_ERROR(gmu->dev, "Unable to map GMU buffer object\n"); - - for (i = i - 1 ; i >= 0; i--) - iommu_unmap(gmu->domain, - bo->iova + (PAGE_SIZE * i), - PAGE_SIZE); - - goto err; - } - } - - bo->virt = vmap(bo->pages, count, VM_IOREMAP, - pgprot_writecombine(PAGE_KERNEL)); - if (!bo->virt) - goto err; - - /* Align future IOVA addresses on 1MB boundaries */ - gmu->uncached_iova_base += ALIGN(size, SZ_1M); - return bo; - -err: - for (i = 0; i < count; i++) { - if (bo->pages[i]) - __free_pages(bo->pages[i], 0); - } - - kfree(bo->pages); - kfree(bo); - - return ERR_PTR(-ENOMEM); -} - -static int a6xx_gmu_memory_probe(struct a6xx_gmu *gmu) -{ - int ret; - - /* - * The GMU address space is hardcoded to treat the range - * 0x60000000 - 0x80000000 as un-cached memory. All buffers shared - * between the GMU and the CPU will live in this space - */ - gmu->uncached_iova_base = 0x60000000; - - - gmu->domain = iommu_domain_alloc(&platform_bus_type); - if (!gmu->domain) - return -ENODEV; - - ret = iommu_attach_device(gmu->domain, gmu->dev); - - if (ret) { - iommu_domain_free(gmu->domain); - gmu->domain = NULL; - } - - return ret; } /* Return the 'arc-level' for the given frequency */ @@ -1289,10 +1206,6 @@ void a6xx_gmu_remove(struct a6xx_gpu *a6xx_gpu) a6xx_gmu_memory_free(gmu, gmu->hfi); - iommu_detach_device(gmu->domain, gmu->dev); - - iommu_domain_free(gmu->domain); - free_irq(gmu->gmu_irq, gmu); free_irq(gmu->hfi_irq, gmu); @@ -1313,7 +1226,15 @@ int a6xx_gmu_init(struct a6xx_gpu *a6xx_gpu, struct device_node *node) gmu->dev = &pdev->dev; - of_dma_configure(gmu->dev, node, true); + /* Pass force_dma false to require the DT to set the dma region */ + ret = of_dma_configure(gmu->dev, node, false); + if (ret) + return ret; + + /* Set the mask after the of_dma_configure() */ + ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(31)); + if (ret) + return ret; /* Fow now, don't do anything fancy until we get our feet under us */ gmu->idle_level = GMU_IDLE_STATE_ACTIVE; @@ -1325,11 +1246,6 @@ int a6xx_gmu_init(struct a6xx_gpu *a6xx_gpu, struct device_node *node) if (ret) goto err_put_device; - /* Set up the IOMMU context bank */ - ret = a6xx_gmu_memory_probe(gmu); - if (ret) - goto err_put_device; - /* Allocate memory for for the HFI queues */ gmu->hfi = a6xx_gmu_memory_alloc(gmu, SZ_16K); if (IS_ERR(gmu->hfi)) @@ -1375,11 +1291,6 @@ err_mmio: err_memory: a6xx_gmu_memory_free(gmu, gmu->hfi); - if (gmu->domain) { - iommu_detach_device(gmu->domain, gmu->dev); - - iommu_domain_free(gmu->domain); - } ret = -ENODEV; err_put_device: diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gmu.h b/drivers/gpu/drm/msm/adreno/a6xx_gmu.h index 2af91ed7ed0c..4af65a36d5ca 100644 --- a/drivers/gpu/drm/msm/adreno/a6xx_gmu.h +++ b/drivers/gpu/drm/msm/adreno/a6xx_gmu.h @@ -12,8 +12,7 @@ struct a6xx_gmu_bo { void *virt; size_t size; - u64 iova; - struct page **pages; + dma_addr_t iova; }; /* @@ -49,9 +48,6 @@ struct a6xx_gmu { int hfi_irq; int gmu_irq; - struct iommu_domain *domain; - u64 uncached_iova_base; - struct device *gxpd; int idle_level; -- cgit v1.2.3