summaryrefslogtreecommitdiff
path: root/drivers/gpu/drm/amd/display/dc
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/drm/amd/display/dc')
-rw-r--r--drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c92
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/clk_mgr.c16
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn31/dcn31_clk_mgr.c34
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc.c124
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_link.c505
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_link_ddc.c17
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c557
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_link_dpia.c57
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_link_enc_cfg.c31
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_resource.c19
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_stream.c4
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc.h31
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_dp_types.h8
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_link.h21
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_stream.h13
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_types.h1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_audio.c6
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_aux.c34
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_hwseq.h5
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dmub_psr.c22
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dmub_psr.h1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c30
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp_dscl.c12
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c57
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dccg.h34
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c7
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c19
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dpp.c14
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dpp_cm.c8
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hwseq.c42
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn30/dcn30_mpc.c7
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn30/dcn30_resource.c42
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn301/Makefile26
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn301/dcn301_resource.c349
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn301/dcn301_resource.h3
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn303/dcn303_resource.c14
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn31/dcn31_apg.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn31/dcn31_dccg.c237
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn31/dcn31_dccg.h34
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn31/dcn31_dio_link_encoder.c4
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hwseq.c174
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hwseq.h1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn31/dcn31_init.c1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn31/dcn31_resource.c38
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn31/dcn31_resource.h10
-rw-r--r--drivers/gpu/drm/amd/display/dc/dm_cp_psp.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/Makefile6
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn30/display_mode_vba_30.c13
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn301/dcn301_fpu.c390
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn301/dcn301_fpu.h42
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn31/display_mode_vba_31.c20
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dsc/qp_tables.h (renamed from drivers/gpu/drm/amd/display/dc/dsc/qp_tables.h)0
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dsc/rc_calc_fpu.c291
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dsc/rc_calc_fpu.h94
-rw-r--r--drivers/gpu/drm/amd/display/dc/dsc/Makefile29
-rw-r--r--drivers/gpu/drm/amd/display/dc/dsc/rc_calc.c259
-rw-r--r--drivers/gpu/drm/amd/display/dc/dsc/rc_calc.h50
-rw-r--r--drivers/gpu/drm/amd/display/dc/dsc/rc_calc_dpi.c1
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/dc_link_dp.h16
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/dccg.h9
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/dpp.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/mpc.h1
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw_sequencer_private.h1
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/link_enc_cfg.h2
64 files changed, 2630 insertions, 1361 deletions
diff --git a/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c b/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c
index cdb5c027411a..a4bef4364afd 100644
--- a/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c
+++ b/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c
@@ -99,6 +99,10 @@ static enum bp_result get_firmware_info_v3_2(
struct bios_parser *bp,
struct dc_firmware_info *info);
+static enum bp_result get_firmware_info_v3_4(
+ struct bios_parser *bp,
+ struct dc_firmware_info *info);
+
static struct atom_hpd_int_record *get_hpd_record(struct bios_parser *bp,
struct atom_display_object_path_v2 *object);
@@ -1426,8 +1430,10 @@ static enum bp_result bios_parser_get_firmware_info(
break;
case 2:
case 3:
- case 4:
result = get_firmware_info_v3_2(bp, info);
+ break;
+ case 4:
+ result = get_firmware_info_v3_4(bp, info);
break;
default:
break;
@@ -1575,6 +1581,88 @@ static enum bp_result get_firmware_info_v3_2(
return BP_RESULT_OK;
}
+static enum bp_result get_firmware_info_v3_4(
+ struct bios_parser *bp,
+ struct dc_firmware_info *info)
+{
+ struct atom_firmware_info_v3_4 *firmware_info;
+ struct atom_common_table_header *header;
+ struct atom_data_revision revision;
+ struct atom_display_controller_info_v4_1 *dce_info_v4_1 = NULL;
+ struct atom_display_controller_info_v4_4 *dce_info_v4_4 = NULL;
+ if (!info)
+ return BP_RESULT_BADINPUT;
+
+ firmware_info = GET_IMAGE(struct atom_firmware_info_v3_4,
+ DATA_TABLES(firmwareinfo));
+
+ if (!firmware_info)
+ return BP_RESULT_BADBIOSTABLE;
+
+ memset(info, 0, sizeof(*info));
+
+ header = GET_IMAGE(struct atom_common_table_header,
+ DATA_TABLES(dce_info));
+
+ get_atom_data_table_revision(header, &revision);
+
+ switch (revision.major) {
+ case 4:
+ switch (revision.minor) {
+ case 4:
+ dce_info_v4_4 = GET_IMAGE(struct atom_display_controller_info_v4_4,
+ DATA_TABLES(dce_info));
+
+ if (!dce_info_v4_4)
+ return BP_RESULT_BADBIOSTABLE;
+
+ /* 100MHz expected */
+ info->pll_info.crystal_frequency = dce_info_v4_4->dce_refclk_10khz * 10;
+ info->dp_phy_ref_clk = dce_info_v4_4->dpphy_refclk_10khz * 10;
+ /* 50MHz expected */
+ info->i2c_engine_ref_clk = dce_info_v4_4->i2c_engine_refclk_10khz * 10;
+
+ /* Get SMU Display PLL VCO Frequency in KHz*/
+ info->smu_gpu_pll_output_freq = dce_info_v4_4->dispclk_pll_vco_freq * 10;
+ break;
+
+ default:
+ /* should not come here, keep as backup, as was before */
+ dce_info_v4_1 = GET_IMAGE(struct atom_display_controller_info_v4_1,
+ DATA_TABLES(dce_info));
+
+ if (!dce_info_v4_1)
+ return BP_RESULT_BADBIOSTABLE;
+
+ info->pll_info.crystal_frequency = dce_info_v4_1->dce_refclk_10khz * 10;
+ info->dp_phy_ref_clk = dce_info_v4_1->dpphy_refclk_10khz * 10;
+ info->i2c_engine_ref_clk = dce_info_v4_1->i2c_engine_refclk_10khz * 10;
+ break;
+ }
+ break;
+
+ default:
+ ASSERT(0);
+ break;
+ }
+
+ header = GET_IMAGE(struct atom_common_table_header,
+ DATA_TABLES(smu_info));
+ get_atom_data_table_revision(header, &revision);
+
+ // We need to convert from 10KHz units into KHz units.
+ info->default_memory_clk = firmware_info->bootup_mclk_in10khz * 10;
+
+ if (firmware_info->board_i2c_feature_id == 0x2) {
+ info->oem_i2c_present = true;
+ info->oem_i2c_obj_id = firmware_info->board_i2c_feature_gpio_id;
+ } else {
+ info->oem_i2c_present = false;
+ }
+
+ return BP_RESULT_OK;
+}
+
static enum bp_result bios_parser_get_encoder_cap_info(
struct dc_bios *dcb,
struct graphics_object_id object_id,
@@ -2233,6 +2321,8 @@ static enum bp_result get_integrated_info_v2_2(
info->ext_disp_conn_info.checksum =
info_v2_2->extdispconninfo.checksum;
+ info->ext_disp_conn_info.fixdpvoltageswing =
+ info_v2_2->extdispconninfo.fixdpvoltageswing;
info->edp1_info.edp_backlight_pwm_hz =
le16_to_cpu(info_v2_2->edp1_info.edp_backlight_pwm_hz);
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/clk_mgr.c
index 1548b2a3fe03..26f96ee32472 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/clk_mgr.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/clk_mgr.c
@@ -100,11 +100,13 @@ void clk_mgr_exit_optimized_pwr_state(const struct dc *dc, struct clk_mgr *clk_m
if (edp_num) {
for (panel_inst = 0; panel_inst < edp_num; panel_inst++) {
+ bool allow_active = false;
+
edp_link = edp_links[panel_inst];
if (!edp_link->psr_settings.psr_feature_enabled)
continue;
clk_mgr->psr_allow_active_cache = edp_link->psr_settings.psr_allow_active;
- dc_link_set_psr_allow_active(edp_link, false, false, false);
+ dc_link_set_psr_allow_active(edp_link, &allow_active, false, false, NULL);
}
}
@@ -124,7 +126,7 @@ void clk_mgr_optimize_pwr_state(const struct dc *dc, struct clk_mgr *clk_mgr)
if (!edp_link->psr_settings.psr_feature_enabled)
continue;
dc_link_set_psr_allow_active(edp_link,
- clk_mgr->psr_allow_active_cache, false, false);
+ &clk_mgr->psr_allow_active_cache, false, false, NULL);
}
}
@@ -283,13 +285,8 @@ struct clk_mgr *dc_clk_mgr_create(struct dc_context *ctx, struct pp_smu_funcs *p
BREAK_TO_DEBUGGER();
return NULL;
}
- if (ASICREV_IS_YELLOW_CARP(asic_id.hw_internal_rev)) {
- /* TODO: to add DCN31 clk_mgr support, once CLK IP header files are available,
- * for now use DCN3.0 clk mgr.
- */
- dcn31_clk_mgr_construct(ctx, clk_mgr, pp_smu, dccg);
- return &clk_mgr->base.base;
- }
+
+ dcn31_clk_mgr_construct(ctx, clk_mgr, pp_smu, dccg);
return &clk_mgr->base.base;
}
#endif
@@ -326,7 +323,6 @@ void dc_destroy_clk_mgr(struct clk_mgr *clk_mgr_base)
break;
case FAMILY_YELLOW_CARP:
- if (ASICREV_IS_YELLOW_CARP(clk_mgr_base->ctx->asic_id.hw_internal_rev))
dcn31_clk_mgr_destroy(clk_mgr);
break;
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn31/dcn31_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn31/dcn31_clk_mgr.c
index d7bf9283dc90..f4c9a458ace8 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn31/dcn31_clk_mgr.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn31/dcn31_clk_mgr.c
@@ -219,14 +219,17 @@ static void dcn31_update_clocks(struct clk_mgr *clk_mgr_base,
update_dispclk = true;
}
- /* TODO: add back DTO programming when DPPCLK restore is fixed in FSDL*/
if (dpp_clock_lowered) {
// increase per DPP DTO before lowering global dppclk
+ dcn20_update_clocks_update_dpp_dto(clk_mgr, context, safe_to_lower);
dcn31_smu_set_dppclk(clk_mgr, clk_mgr_base->clks.dppclk_khz);
} else {
// increase global DPPCLK before lowering per DPP DTO
if (update_dppclk || update_dispclk)
dcn31_smu_set_dppclk(clk_mgr, clk_mgr_base->clks.dppclk_khz);
+ // always update dtos unless clock is lowered and not safe to lower
+ if (new_clocks->dppclk_khz >= dc->current_state->bw_ctx.bw.dcn.clk.dppclk_khz)
+ dcn20_update_clocks_update_dpp_dto(clk_mgr, context, safe_to_lower);
}
// notify DMCUB of latest clocks
@@ -368,32 +371,32 @@ static struct wm_table lpddr5_wm_table = {
.wm_inst = WM_A,
.wm_type = WM_TYPE_PSTATE_CHG,
.pstate_latency_us = 11.65333,
- .sr_exit_time_us = 5.32,
- .sr_enter_plus_exit_time_us = 6.38,
+ .sr_exit_time_us = 11.5,
+ .sr_enter_plus_exit_time_us = 14.5,
.valid = true,
},
{
.wm_inst = WM_B,
.wm_type = WM_TYPE_PSTATE_CHG,
.pstate_latency_us = 11.65333,
- .sr_exit_time_us = 9.82,
- .sr_enter_plus_exit_time_us = 11.196,
+ .sr_exit_time_us = 11.5,
+ .sr_enter_plus_exit_time_us = 14.5,
.valid = true,
},
{
.wm_inst = WM_C,
.wm_type = WM_TYPE_PSTATE_CHG,
.pstate_latency_us = 11.65333,
- .sr_exit_time_us = 9.89,
- .sr_enter_plus_exit_time_us = 11.24,
+ .sr_exit_time_us = 11.5,
+ .sr_enter_plus_exit_time_us = 14.5,
.valid = true,
},
{
.wm_inst = WM_D,
.wm_type = WM_TYPE_PSTATE_CHG,
.pstate_latency_us = 11.65333,
- .sr_exit_time_us = 9.748,
- .sr_enter_plus_exit_time_us = 11.102,
+ .sr_exit_time_us = 11.5,
+ .sr_enter_plus_exit_time_us = 14.5,
.valid = true,
},
}
@@ -520,14 +523,21 @@ static unsigned int find_clk_for_voltage(
unsigned int voltage)
{
int i;
+ int max_voltage = 0;
+ int clock = 0;
for (i = 0; i < NUM_SOC_VOLTAGE_LEVELS; i++) {
- if (clock_table->SocVoltage[i] == voltage)
+ if (clock_table->SocVoltage[i] == voltage) {
return clocks[i];
+ } else if (clock_table->SocVoltage[i] >= max_voltage &&
+ clock_table->SocVoltage[i] < voltage) {
+ max_voltage = clock_table->SocVoltage[i];
+ clock = clocks[i];
+ }
}
- ASSERT(0);
- return 0;
+ ASSERT(clock);
+ return clock;
}
void dcn31_clk_mgr_helper_populate_bw_params(
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc.c b/drivers/gpu/drm/amd/display/dc/core/dc.c
index da942e9f5142..0ded4decee05 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc.c
@@ -71,8 +71,6 @@
#include "dmub/dmub_srv.h"
-#include "dcn30/dcn30_vpg.h"
-
#include "i2caux_interface.h"
#include "dce/dmub_hw_lock_mgr.h"
@@ -1087,6 +1085,8 @@ static void disable_dangling_plane(struct dc *dc, struct dc_state *context)
struct dc_stream_state *old_stream =
dc->current_state->res_ctx.pipe_ctx[i].stream;
bool should_disable = true;
+ bool pipe_split_change =
+ context->res_ctx.pipe_ctx[i].top_pipe != dc->current_state->res_ctx.pipe_ctx[i].top_pipe;
for (j = 0; j < context->stream_count; j++) {
if (old_stream == context->streams[j]) {
@@ -1094,6 +1094,9 @@ static void disable_dangling_plane(struct dc *dc, struct dc_state *context)
break;
}
}
+ if (!should_disable && pipe_split_change)
+ should_disable = true;
+
if (should_disable && old_stream) {
dc_rem_all_planes_for_stream(dc, old_stream, dangling_context);
disable_all_writeback_pipes_for_stream(dc, old_stream, dangling_context);
@@ -1889,6 +1892,7 @@ static bool is_flip_pending_in_pipes(struct dc *dc, struct dc_state *context)
return false;
}
+#ifdef CONFIG_DRM_AMD_DC_DCN
/* Perform updates here which need to be deferred until next vupdate
*
* i.e. blnd lut, 3dlut, and shaper lut bypass regs are double buffered
@@ -1898,15 +1902,16 @@ static bool is_flip_pending_in_pipes(struct dc *dc, struct dc_state *context)
*/
static void process_deferred_updates(struct dc *dc)
{
-#ifdef CONFIG_DRM_AMD_DC_DCN
- int i;
+ int i = 0;
- if (dc->debug.enable_mem_low_power.bits.cm)
+ if (dc->debug.enable_mem_low_power.bits.cm) {
+ ASSERT(dc->dcn_ip->max_num_dpp);
for (i = 0; i < dc->dcn_ip->max_num_dpp; i++)
if (dc->res_pool->dpps[i]->funcs->dpp_deferred_update)
dc->res_pool->dpps[i]->funcs->dpp_deferred_update(dc->res_pool->dpps[i]);
-#endif
+ }
}
+#endif /* CONFIG_DRM_AMD_DC_DCN */
void dc_post_update_surfaces_to_stream(struct dc *dc)
{
@@ -1933,7 +1938,9 @@ void dc_post_update_surfaces_to_stream(struct dc *dc)
dc->hwss.disable_plane(dc, &context->res_ctx.pipe_ctx[i]);
}
+#ifdef CONFIG_DRM_AMD_DC_DCN
process_deferred_updates(dc);
+#endif
dc->hwss.optimize_bandwidth(dc, context);
@@ -2285,6 +2292,9 @@ static enum surface_update_type det_surface_update(const struct dc *dc,
update_flags->bits.gamma_change = 1;
}
+ if (u->lut3d_func || u->func_shaper)
+ update_flags->bits.lut_3d = 1;
+
if (u->hdr_mult.value)
if (u->hdr_mult.value != u->surface->hdr_mult.value) {
update_flags->bits.hdr_mult = 1;
@@ -2298,6 +2308,7 @@ static enum surface_update_type det_surface_update(const struct dc *dc,
if (update_flags->bits.input_csc_change
|| update_flags->bits.coeff_reduction_change
+ || update_flags->bits.lut_3d
|| update_flags->bits.gamma_change
|| update_flags->bits.gamut_remap_change) {
type = UPDATE_TYPE_FULL;
@@ -2356,6 +2367,11 @@ static enum surface_update_type check_update_surfaces_for_stream(
if (stream_update->dsc_config)
su_flags->bits.dsc_changed = 1;
+#if defined(CONFIG_DRM_AMD_DC_DCN)
+ if (stream_update->mst_bw_update)
+ su_flags->bits.mst_bw = 1;
+#endif
+
if (su_flags->raw != 0)
overall_type = UPDATE_TYPE_FULL;
@@ -2674,9 +2690,6 @@ static void commit_planes_do_stream_update(struct dc *dc,
enum surface_update_type update_type,
struct dc_state *context)
{
-#if defined(CONFIG_DRM_AMD_DC_DCN)
- struct vpg *vpg;
-#endif
int j;
// Stream updates
@@ -2697,11 +2710,6 @@ static void commit_planes_do_stream_update(struct dc *dc,
stream_update->vrr_infopacket ||
stream_update->vsc_infopacket ||
stream_update->vsp_infopacket) {
-#if defined(CONFIG_DRM_AMD_DC_DCN)
- vpg = pipe_ctx->stream_res.stream_enc->vpg;
- if (vpg && vpg->funcs->vpg_poweron)
- vpg->funcs->vpg_poweron(vpg);
-#endif
resource_build_info_frame(pipe_ctx);
dc->hwss.update_info_frame(pipe_ctx);
}
@@ -2741,6 +2749,15 @@ static void commit_planes_do_stream_update(struct dc *dc,
if (stream_update->dsc_config)
dp_update_dsc_config(pipe_ctx);
+#if defined(CONFIG_DRM_AMD_DC_DCN)
+ if (stream_update->mst_bw_update) {
+ if (stream_update->mst_bw_update->is_increase)
+ dc_link_increase_mst_payload(pipe_ctx, stream_update->mst_bw_update->mst_stream_bw);
+ else
+ dc_link_reduce_mst_payload(pipe_ctx, stream_update->mst_bw_update->mst_stream_bw);
+ }
+#endif
+
if (stream_update->pending_test_pattern) {
dc_link_dp_set_test_pattern(stream->link,
stream->test_pattern.type,
@@ -3118,8 +3135,13 @@ void dc_commit_updates_for_stream(struct dc *dc,
if (new_pipe->plane_state && new_pipe->plane_state != old_pipe->plane_state)
new_pipe->plane_state->force_full_update = true;
}
- } else if (update_type == UPDATE_TYPE_FAST) {
- /* Previous frame finished and HW is ready for optimization. */
+ } else if (update_type == UPDATE_TYPE_FAST && dc_ctx->dce_version >= DCE_VERSION_MAX) {
+ /*
+ * Previous frame finished and HW is ready for optimization.
+ *
+ * Only relevant for DCN behavior where we can guarantee the optimization
+ * is safe to apply - retain the legacy behavior for DCE.
+ */
dc_post_update_surfaces_to_stream(dc);
}
@@ -3178,6 +3200,12 @@ void dc_commit_updates_for_stream(struct dc *dc,
}
}
+ /* Legacy optimization path for DCE. */
+ if (update_type >= UPDATE_TYPE_FULL && dc_ctx->dce_version < DCE_VERSION_MAX) {
+ dc_post_update_surfaces_to_stream(dc);
+ TRACE_DCE_CLOCK_STATE(&context->bw_ctx.bw.dce);
+ }
+
return;
}
@@ -3478,6 +3506,7 @@ void dc_get_clock(struct dc *dc, enum dc_clock_type clock_type, struct dc_clock_
bool dc_set_psr_allow_active(struct dc *dc, bool enable)
{
int i;
+ bool allow_active;
for (i = 0; i < dc->current_state->stream_count ; i++) {
struct dc_link *link;
@@ -3489,10 +3518,12 @@ bool dc_set_psr_allow_active(struct dc *dc, bool enable)
if (link->psr_settings.psr_feature_enabled) {
if (enable && !link->psr_settings.psr_allow_active) {
- if (!dc_link_set_psr_allow_active(link, true, false, false))
+ allow_active = true;
+ if (!dc_link_set_psr_allow_active(link, &allow_active, false, false, NULL))
return false;
} else if (!enable && link->psr_settings.psr_allow_active) {
- if (!dc_link_set_psr_allow_active(link, false, true, false))
+ allow_active = false;
+ if (!dc_link_set_psr_allow_active(link, &allow_active, true, false, NULL))
return false;
}
}
@@ -3579,7 +3610,8 @@ bool dc_enable_dmub_notifications(struct dc *dc)
#if defined(CONFIG_DRM_AMD_DC_DCN)
/* YELLOW_CARP B0 USB4 DPIA needs dmub notifications for interrupts */
if (dc->ctx->asic_id.chip_family == FAMILY_YELLOW_CARP &&
- dc->ctx->asic_id.hw_internal_rev == YELLOW_CARP_B0)
+ dc->ctx->asic_id.hw_internal_rev == YELLOW_CARP_B0 &&
+ !dc->debug.dpia_debug.bits.disable_dpia)
return true;
#endif
/* dmub aux needs dmub notifications to be enabled */
@@ -3727,6 +3759,60 @@ bool dc_process_dmub_set_config_async(struct dc *dc,
}
/**
+ *****************************************************************************
+ * Function: dc_process_dmub_set_mst_slots
+ *
+ * @brief
+ * Submits mst slot allocation command to dmub via inbox message
+ *
+ * @param
+ * [in] dc: dc structure
+ * [in] link_index: link index
+ * [in] mst_alloc_slots: mst slots to be allotted
+ * [out] mst_slots_in_use: mst slots in use returned in failure case
+ *
+ * @return
+ * DC_OK if successful, DC_ERROR if failure
+ *****************************************************************************
+ */
+enum dc_status dc_process_dmub_set_mst_slots(const struct dc *dc,
+ uint32_t link_index,
+ uint8_t mst_alloc_slots,
+ uint8_t *mst_slots_in_use)
+{
+ union dmub_rb_cmd cmd = {0};
+ struct dc_dmub_srv *dmub_srv = dc->ctx->dmub_srv;
+
+ /* prepare MST_ALLOC_SLOTS command */
+ cmd.set_mst_alloc_slots.header.type = DMUB_CMD__DPIA;
+ cmd.set_mst_alloc_slots.header.sub_type = DMUB_CMD__DPIA_MST_ALLOC_SLOTS;
+
+ cmd.set_mst_alloc_slots.mst_slots_control.instance = dc->links[link_index]->ddc_hw_inst;
+ cmd.set_mst_alloc_slots.mst_slots_control.mst_alloc_slots = mst_alloc_slots;
+
+ if (!dc_dmub_srv_cmd_with_reply_data(dmub_srv, &cmd))
+ /* command is not processed by dmub */
+ return DC_ERROR_UNEXPECTED;
+
+ /* command processed by dmub, if ret_status is 1 */
+ if (cmd.set_config_access.header.ret_status != 1)
+ /* command processing error */
+ return DC_ERROR_UNEXPECTED;
+
+ /* command processed and we have a status of 2, mst not enabled in dpia */
+ if (cmd.set_mst_alloc_slots.mst_slots_control.immed_status == 2)
+ return DC_FAIL_UNSUPPORTED_1;
+
+ /* previously configured mst alloc and used slots did not match */
+ if (cmd.set_mst_alloc_slots.mst_slots_control.immed_status == 3) {
+ *mst_slots_in_use = cmd.set_mst_alloc_slots.mst_slots_control.mst_slots_in_use;
+ return DC_NOT_SUPPORTED;
+ }
+
+ return DC_OK;
+}
+
+/**
* dc_disable_accelerated_mode - disable accelerated mode
* @dc: dc structure
*/
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link.c b/drivers/gpu/drm/amd/display/dc/core/dc_link.c
index ca5dc3c168ec..60544788e911 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_link.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_link.c
@@ -674,13 +674,13 @@ static void query_hdcp_capability(enum signal_type signal, struct dc_link *link)
static void read_current_link_settings_on_detect(struct dc_link *link)
{
- union lane_count_set lane_count_set = { {0} };
+ union lane_count_set lane_count_set = {0};
uint8_t link_bw_set;
uint8_t link_rate_set;
uint32_t read_dpcd_retry_cnt = 10;
enum dc_status status = DC_ERROR_UNEXPECTED;
int i;
- union max_down_spread max_down_spread = { {0} };
+ union max_down_spread max_down_spread = {0};
// Read DPCD 00101h to find out the number of lanes currently set
for (i = 0; i < read_dpcd_retry_cnt; i++) {
@@ -1660,6 +1660,14 @@ static bool dc_link_construct_legacy(struct dc_link *link,
DC_LOG_DC("BIOS object table - ddi_channel_mapping: 0x%04X", link->ddi_channel_mapping.raw);
DC_LOG_DC("BIOS object table - chip_caps: %d", link->chip_caps);
}
+
+ if (link->chip_caps & EXT_DISPLAY_PATH_CAPS__DP_FIXED_VS_EN) {
+ link->bios_forced_drive_settings.VOLTAGE_SWING =
+ (info->ext_disp_conn_info.fixdpvoltageswing & 0x3);
+ link->bios_forced_drive_settings.PRE_EMPHASIS =
+ ((info->ext_disp_conn_info.fixdpvoltageswing >> 2) & 0x3);
+ }
+
break;
}
}
@@ -1757,6 +1765,9 @@ static bool dc_link_construct_dpia(struct dc_link *link,
link->psr_settings.psr_version = DC_PSR_VERSION_UNSUPPORTED;
+ /* Some docks seem to NAK I2C writes to segment pointer with mot=0. */
+ link->wa_flags.dp_mot_reset_segment = true;
+
return true;
ddc_create_fail:
@@ -1869,8 +1880,13 @@ static enum dc_status enable_link_dp(struct dc_state *state,
do_fallback = true;
#if defined(CONFIG_DRM_AMD_DC_DCN)
+ /*
+ * Temporary w/a to get DP2.0 link rates to work with SST.
+ * TODO DP2.0 - Workaround: Remove w/a if and when the issue is resolved.
+ */
if (dp_get_link_encoding_format(&link_settings) == DP_128b_132b_ENCODING &&
- pipe_ctx->stream->signal == SIGNAL_TYPE_DISPLAY_PORT) {
+ pipe_ctx->stream->signal == SIGNAL_TYPE_DISPLAY_PORT &&
+ link->dc->debug.set_mst_en_for_sst) {
dp_enable_mst_on_sink(link, true);
}
#endif
@@ -1983,51 +1999,6 @@ static enum dc_status enable_link_dp_mst(
return enable_link_dp(state, pipe_ctx);
}
-void blank_all_dp_displays(struct dc *dc, bool hw_init)
-{
- unsigned int i, j, fe;
- uint8_t dpcd_power_state = '\0';
- enum dc_status status = DC_ERROR_UNEXPECTED;
-
- for (i = 0; i < dc->link_count; i++) {
- enum signal_type signal = dc->links[i]->connector_signal;
-
- if ((signal == SIGNAL_TYPE_EDP) ||
- (signal == SIGNAL_TYPE_DISPLAY_PORT)) {
- if (hw_init && signal != SIGNAL_TYPE_EDP && dc->links[i]->priv != NULL) {
- /* DP 2.0 spec requires that we read LTTPR caps first */
- dp_retrieve_lttpr_cap(dc->links[i]);
- /* if any of the displays are lit up turn them off */
- status = core_link_read_dpcd(dc->links[i], DP_SET_POWER,
- &dpcd_power_state, sizeof(dpcd_power_state));
- }
-
- if ((signal != SIGNAL_TYPE_EDP && status == DC_OK && dpcd_power_state == DP_POWER_STATE_D0) ||
- (!hw_init && dc->links[i]->link_enc &&
- dc->links[i]->link_enc->funcs->is_dig_enabled(dc->links[i]->link_enc))) {
- if (dc->links[i]->link_enc->funcs->get_dig_frontend) {
- fe = dc->links[i]->link_enc->funcs->get_dig_frontend(dc->links[i]->link_enc);
- if (fe == ENGINE_ID_UNKNOWN)
- continue;
-
- for (j = 0; j < dc->res_pool->stream_enc_count; j++) {
- if (fe == dc->res_pool->stream_enc[j]->id) {
- dc->res_pool->stream_enc[j]->funcs->dp_blank(dc->links[i],
- dc->res_pool->stream_enc[j]);
- break;
- }
- }
- }
-
- if (!dc->links[i]->wa_flags.dp_keep_receiver_powered ||
- (hw_init && signal != SIGNAL_TYPE_EDP && dc->links[i]->priv != NULL))
- dp_receiver_power_ctrl(dc->links[i], false);
- }
- }
- }
-
-}
-
static bool get_ext_hdmi_settings(struct pipe_ctx *pipe_ctx,
enum engine_id eng_id,
struct ext_hdmi_settings *settings)
@@ -2956,8 +2927,8 @@ bool dc_link_set_backlight_level(const struct dc_link *link,
return true;
}
-bool dc_link_set_psr_allow_active(struct dc_link *link, bool allow_active,
- bool wait, bool force_static)
+bool dc_link_set_psr_allow_active(struct dc_link *link, const bool *allow_active,
+ bool wait, bool force_static, const unsigned int *power_opts)
{
struct dc *dc = link->ctx->dc;
struct dmcu *dmcu = dc->res_pool->dmcu;
@@ -2970,20 +2941,33 @@ bool dc_link_set_psr_allow_active(struct dc_link *link, bool allow_active,
if (!dc_get_edp_link_panel_inst(dc, link, &panel_inst))
return false;
- link->psr_settings.psr_allow_active = allow_active;
+ /* Set power optimization flag */
+ if (power_opts && link->psr_settings.psr_power_opt != *power_opts) {
+ link->psr_settings.psr_power_opt = *power_opts;
+
+ if (psr != NULL && link->psr_settings.psr_feature_enabled && psr->funcs->psr_set_power_opt)
+ psr->funcs->psr_set_power_opt(psr, link->psr_settings.psr_power_opt);
+ }
+
+ /* Enable or Disable PSR */
+ if (allow_active && link->psr_settings.psr_allow_active != *allow_active) {
+ link->psr_settings.psr_allow_active = *allow_active;
+
#if defined(CONFIG_DRM_AMD_DC_DCN)
- if (!allow_active)
- dc_z10_restore(dc);
+ if (!link->psr_settings.psr_allow_active)
+ dc_z10_restore(dc);
#endif
- if (psr != NULL && link->psr_settings.psr_feature_enabled) {
- if (force_static && psr->funcs->psr_force_static)
- psr->funcs->psr_force_static(psr, panel_inst);
- psr->funcs->psr_enable(psr, allow_active, wait, panel_inst);
- } else if ((dmcu != NULL && dmcu->funcs->is_dmcu_initialized(dmcu)) && link->psr_settings.psr_feature_enabled)
- dmcu->funcs->set_psr_enable(dmcu, allow_active, wait);
- else
- return false;
+ if (psr != NULL && link->psr_settings.psr_feature_enabled) {
+ if (force_static && psr->funcs->psr_force_static)
+ psr->funcs->psr_force_static(psr, panel_inst);
+ psr->funcs->psr_enable(psr, link->psr_settings.psr_allow_active, wait, panel_inst);
+ } else if ((dmcu != NULL && dmcu->funcs->is_dmcu_initialized(dmcu)) &&
+ link->psr_settings.psr_feature_enabled)
+ dmcu->funcs->set_psr_enable(dmcu, link->psr_settings.psr_allow_active, wait);
+ else
+ return false;
+ }
return true;
}
@@ -3272,10 +3256,12 @@ static struct fixed31_32 get_pbn_from_timing(struct pipe_ctx *pipe_ctx)
static void update_mst_stream_alloc_table(
struct dc_link *link,
struct stream_encoder *stream_enc,
+#if defined(CONFIG_DRM_AMD_DC_DCN)
+ struct hpo_dp_stream_encoder *hpo_dp_stream_enc, // TODO: Rename stream_enc to dio_stream_enc?
+#endif
const struct dp_mst_stream_allocation_table *proposed_table)
{
- struct link_mst_stream_allocation work_table[MAX_CONTROLLER_NUM] = {
- { 0 } };
+ struct link_mst_stream_allocation work_table[MAX_CONTROLLER_NUM] = { 0 };
struct link_mst_stream_allocation *dc_alloc;
int i;
@@ -3308,6 +3294,9 @@ static void update_mst_stream_alloc_table(
work_table[i].slot_count =
proposed_table->stream_allocations[i].slot_count;
work_table[i].stream_enc = stream_enc;
+#if defined(CONFIG_DRM_AMD_DC_DCN)
+ work_table[i].hpo_dp_stream_enc = hpo_dp_stream_enc;
+#endif
}
}
@@ -3430,11 +3419,15 @@ enum dc_status dc_link_allocate_mst_payload(struct pipe_ctx *pipe_ctx)
struct dc_link *link = stream->link;
struct link_encoder *link_encoder = NULL;
struct stream_encoder *stream_encoder = pipe_ctx->stream_res.stream_enc;
+#if defined(CONFIG_DRM_AMD_DC_DCN)
+ struct hpo_dp_link_encoder *hpo_dp_link_encoder = link->hpo_dp_link_enc;
+ struct hpo_dp_stream_encoder *hpo_dp_stream_encoder = pipe_ctx->stream_res.hpo_dp_stream_enc;
+#endif
struct dp_mst_stream_allocation_table proposed_table = {0};
struct fixed31_32 avg_time_slots_per_mtp;
struct fixed31_32 pbn;
struct fixed31_32 pbn_per_slot;
- uint8_t i;
+ int i;
enum act_return_status ret;
DC_LOGGER_INIT(link->ctx->logger);
@@ -3457,7 +3450,14 @@ enum dc_status dc_link_allocate_mst_payload(struct pipe_ctx *pipe_ctx)
&proposed_table,
true)) {
update_mst_stream_alloc_table(
+#if defined(CONFIG_DRM_AMD_DC_DCN)
+ link,
+ pipe_ctx->stream_res.stream_enc,
+ pipe_ctx->stream_res.hpo_dp_stream_enc,
+ &proposed_table);
+#else
link, pipe_ctx->stream_res.stream_enc, &proposed_table);
+#endif
}
else
DC_LOG_WARNING("Failed to update"
@@ -3471,23 +3471,70 @@ enum dc_status dc_link_allocate_mst_payload(struct pipe_ctx *pipe_ctx)
link->mst_stream_alloc_table.stream_count);
for (i = 0; i < MAX_CONTROLLER_NUM; i++) {
+#if defined(CONFIG_DRM_AMD_DC_DCN)
DC_LOG_MST("stream_enc[%d]: %p "
+ "stream[%d].hpo_dp_stream_enc: %p "
"stream[%d].vcp_id: %d "
"stream[%d].slot_count: %d\n",
i,
(void *) link->mst_stream_alloc_table.stream_allocations[i].stream_enc,
i,
+ (void *) link->mst_stream_alloc_table.stream_allocations[i].hpo_dp_stream_enc,
+ i,
link->mst_stream_alloc_table.stream_allocations[i].vcp_id,
i,
link->mst_stream_alloc_table.stream_allocations[i].slot_count);
+#else
+ DC_LOG_MST("stream_enc[%d]: %p "
+ "stream[%d].vcp_id: %d "
+ "stream[%d].slot_count: %d\n",
+ i,
+ (void *) link->mst_stream_alloc_table.stream_allocations[i].stream_enc,
+ i,
+ link->mst_stream_alloc_table.stream_allocations[i].vcp_id,
+ i,
+ link->mst_stream_alloc_table.stream_allocations[i].slot_count);
+#endif
}
ASSERT(proposed_table.stream_count > 0);
+ if (link->ep_type == DISPLAY_ENDPOINT_USB4_DPIA) {
+ static enum dc_status status;
+ uint8_t mst_alloc_slots = 0, prev_mst_slots_in_use = 0xFF;
+
+ for (i = 0; i < link->mst_stream_alloc_table.stream_count; i++)
+ mst_alloc_slots += link->mst_stream_alloc_table.stream_allocations[i].slot_count;
+
+ status = dc_process_dmub_set_mst_slots(link->dc, link->link_index,
+ mst_alloc_slots, &prev_mst_slots_in_use);
+ ASSERT(status == DC_OK);
+ DC_LOG_MST("dpia : status[%d]: alloc_slots[%d]: used_slots[%d]\n",
+ status, mst_alloc_slots, prev_mst_slots_in_use);
+ }
+
/* program DP source TX for payload */
+#if defined(CONFIG_DRM_AMD_DC_DCN)
+ switch (dp_get_link_encoding_format(&link->cur_link_settings)) {
+ case DP_8b_10b_ENCODING:
+ link_encoder->funcs->update_mst_stream_allocation_table(
+ link_encoder,
+ &link->mst_stream_alloc_table);
+ break;
+ case DP_128b_132b_ENCODING:
+ hpo_dp_link_encoder->funcs->update_stream_allocation_table(
+ hpo_dp_link_encoder,
+ &link->mst_stream_alloc_table);
+ break;
+ case DP_UNKNOWN_ENCODING:
+ DC_LOG_ERROR("Failure: unknown encoding format\n");
+ return DC_ERROR_UNEXPECTED;
+ }
+#else
link_encoder->funcs->update_mst_stream_allocation_table(
link_encoder,
&link->mst_stream_alloc_table);
+#endif
/* send down message */
ret = dm_helpers_dp_mst_poll_for_allocation_change_trigger(
@@ -3510,23 +3557,205 @@ enum dc_status dc_link_allocate_mst_payload(struct pipe_ctx *pipe_ctx)
pbn = get_pbn_from_timing(pipe_ctx);
avg_time_slots_per_mtp = dc_fixpt_div(pbn, pbn_per_slot);
+#if defined(CONFIG_DRM_AMD_DC_DCN)
+ switch (dp_get_link_encoding_format(&link->cur_link_settings)) {
+ case DP_8b_10b_ENCODING:
+ stream_encoder->funcs->set_throttled_vcp_size(
+ stream_encoder,
+ avg_time_slots_per_mtp);
+ break;
+ case DP_128b_132b_ENCODING:
+ hpo_dp_link_encoder->funcs->set_throttled_vcp_size(
+ hpo_dp_link_encoder,
+ hpo_dp_stream_encoder->inst,
+ avg_time_slots_per_mtp);
+ break;
+ case DP_UNKNOWN_ENCODING:
+ DC_LOG_ERROR("Failure: unknown encoding format\n");
+ return DC_ERROR_UNEXPECTED;
+ }
+#else
stream_encoder->funcs->set_throttled_vcp_size(
stream_encoder,
avg_time_slots_per_mtp);
+#endif
return DC_OK;
}
+#if defined(CONFIG_DRM_AMD_DC_DCN)
+enum dc_status dc_link_reduce_mst_payload(struct pipe_ctx *pipe_ctx, uint32_t bw_in_kbps)
+{
+ struct dc_stream_state *stream = pipe_ctx->stream;
+ struct dc_link *link = stream->link;
+ struct fixed31_32 avg_time_slots_per_mtp;
+ struct fixed31_32 pbn;
+ struct fixed31_32 pbn_per_slot;
+ struct link_encoder *link_encoder = link->link_enc;
+ struct stream_encoder *stream_encoder = pipe_ctx->stream_res.stream_enc;
+ struct dp_mst_stream_allocation_table proposed_table = {0};
+ uint8_t i;
+ enum act_return_status ret;
+ DC_LOGGER_INIT(link->ctx->logger);
+
+ /* decrease throttled vcp size */
+ pbn_per_slot = get_pbn_per_slot(stream);
+ pbn = get_pbn_from_bw_in_kbps(bw_in_kbps);
+ avg_time_slots_per_mtp = dc_fixpt_div(pbn, pbn_per_slot);
+
+ stream_encoder->funcs->set_throttled_vcp_size(
+ stream_encoder,
+ avg_time_slots_per_mtp);
+
+ /* send ALLOCATE_PAYLOAD sideband message with updated pbn */
+ dm_helpers_dp_mst_send_payload_allocation(
+ stream->ctx,
+ stream,
+ true);
+
+ /* notify immediate branch device table update */
+ if (dm_helpers_dp_mst_write_payload_allocation_table(
+ stream->ctx,
+ stream,
+ &proposed_table,
+ true)) {
+ /* update mst stream allocation table software state */
+ update_mst_stream_alloc_table(
+ link,
+ pipe_ctx->stream_res.stream_enc,
+ pipe_ctx->stream_res.hpo_dp_stream_enc,
+ &proposed_table);
+ } else {
+ DC_LOG_WARNING("Failed to update"
+ "MST allocation table for"
+ "pipe idx:%d\n",
+ pipe_ctx->pipe_idx);
+ }
+
+ DC_LOG_MST("%s "
+ "stream_count: %d: \n ",
+ __func__,
+ link->mst_stream_alloc_table.stream_count);
+
+ for (i = 0; i < MAX_CONTROLLER_NUM; i++) {
+ DC_LOG_MST("stream_enc[%d]: %p "
+ "stream[%d].vcp_id: %d "
+ "stream[%d].slot_count: %d\n",
+ i,
+ (void *) link->mst_stream_alloc_table.stream_allocations[i].stream_enc,
+ i,
+ link->mst_stream_alloc_table.stream_allocations[i].vcp_id,
+ i,
+ link->mst_stream_alloc_table.stream_allocations[i].slot_count);
+ }
+
+ ASSERT(proposed_table.stream_count > 0);
+
+ /* update mst stream allocation table hardware state */
+ link_encoder->funcs->update_mst_stream_allocation_table(
+ link_encoder,
+ &link->mst_stream_alloc_table);
+
+ /* poll for immediate branch device ACT handled */
+ ret = dm_helpers_dp_mst_poll_for_allocation_change_trigger(
+ stream->ctx,
+ stream);
+
+ return DC_OK;
+}
+
+enum dc_status dc_link_increase_mst_payload(struct pipe_ctx *pipe_ctx, uint32_t bw_in_kbps)
+{
+ struct dc_stream_state *stream = pipe_ctx->stream;
+ struct dc_link *link = stream->link;
+ struct fixed31_32 avg_time_slots_per_mtp;
+ struct fixed31_32 pbn;
+ struct fixed31_32 pbn_per_slot;
+ struct link_encoder *link_encoder = link->link_enc;
+ struct stream_encoder *stream_encoder = pipe_ctx->stream_res.stream_enc;
+ struct dp_mst_stream_allocation_table proposed_table = {0};
+ uint8_t i;
+ enum act_return_status ret;
+ DC_LOGGER_INIT(link->ctx->logger);
+
+ /* notify immediate branch device table update */
+ if (dm_helpers_dp_mst_write_payload_allocation_table(
+ stream->ctx,
+ stream,
+ &proposed_table,
+ true)) {
+ /* update mst stream allocation table software state */
+ update_mst_stream_alloc_table(
+ link,
+ pipe_ctx->stream_res.stream_enc,
+ pipe_ctx->stream_res.hpo_dp_stream_enc,
+ &proposed_table);
+ }
+
+ DC_LOG_MST("%s "
+ "stream_count: %d: \n ",
+ __func__,
+ link->mst_stream_alloc_table.stream_count);
+
+ for (i = 0; i < MAX_CONTROLLER_NUM; i++) {
+ DC_LOG_MST("stream_enc[%d]: %p "
+ "stream[%d].vcp_id: %d "
+ "stream[%d].slot_count: %d\n",
+ i,
+ (void *) link->mst_stream_alloc_table.stream_allocations[i].stream_enc,
+ i,
+ link->mst_stream_alloc_table.stream_allocations[i].vcp_id,
+ i,
+ link->mst_stream_alloc_table.stream_allocations[i].slot_count);
+ }
+
+ ASSERT(proposed_table.stream_count > 0);
+
+ /* update mst stream allocation table hardware state */
+ link_encoder->funcs->update_mst_stream_allocation_table(
+ link_encoder,
+ &link->mst_stream_alloc_table);
+
+ /* poll for immediate branch device ACT handled */
+ ret = dm_helpers_dp_mst_poll_for_allocation_change_trigger(
+ stream->ctx,
+ stream);
+
+ if (ret != ACT_LINK_LOST) {
+ /* send ALLOCATE_PAYLOAD sideband message with updated pbn */
+ dm_helpers_dp_mst_send_payload_allocation(
+ stream->ctx,
+ stream,
+ true);
+ }
+
+ /* increase throttled vcp size */
+ pbn = get_pbn_from_bw_in_kbps(bw_in_kbps);
+ pbn_per_slot = get_pbn_per_slot(stream);
+ avg_time_slots_per_mtp = dc_fixpt_div(pbn, pbn_per_slot);
+
+ stream_encoder->funcs->set_throttled_vcp_size(
+ stream_encoder,
+ avg_time_slots_per_mtp);
+
+ return DC_OK;
+}
+#endif
+
static enum dc_status deallocate_mst_payload(struct pipe_ctx *pipe_ctx)
{
struct dc_stream_state *stream = pipe_ctx->stream;
struct dc_link *link = stream->link;
struct link_encoder *link_encoder = NULL;
struct stream_encoder *stream_encoder = pipe_ctx->stream_res.stream_enc;
+#if defined(CONFIG_DRM_AMD_DC_DCN)
+ struct hpo_dp_link_encoder *hpo_dp_link_encoder = link->hpo_dp_link_enc;
+ struct hpo_dp_stream_encoder *hpo_dp_stream_encoder = pipe_ctx->stream_res.hpo_dp_stream_enc;
+#endif
struct dp_mst_stream_allocation_table proposed_table = {0};
struct fixed31_32 avg_time_slots_per_mtp = dc_fixpt_from_int(0);
- uint8_t i;
+ int i;
bool mst_mode = (link->type == dc_connection_mst_branch);
DC_LOGGER_INIT(link->ctx->logger);
@@ -3545,9 +3774,28 @@ static enum dc_status deallocate_mst_payload(struct pipe_ctx *pipe_ctx)
*/
/* slot X.Y */
+#if defined(CONFIG_DRM_AMD_DC_DCN)
+ switch (dp_get_link_encoding_format(&link->cur_link_settings)) {
+ case DP_8b_10b_ENCODING:
+ stream_encoder->funcs->set_throttled_vcp_size(
+ stream_encoder,
+ avg_time_slots_per_mtp);
+ break;
+ case DP_128b_132b_ENCODING:
+ hpo_dp_link_encoder->funcs->set_throttled_vcp_size(
+ hpo_dp_link_encoder,
+ hpo_dp_stream_encoder->inst,
+ avg_time_slots_per_mtp);
+ break;
+ case DP_UNKNOWN_ENCODING:
+ DC_LOG_ERROR("Failure: unknown encoding format\n");
+ return DC_ERROR_UNEXPECTED;
+ }
+#else
stream_encoder->funcs->set_throttled_vcp_size(
stream_encoder,
avg_time_slots_per_mtp);
+#endif
/* TODO: which component is responsible for remove payload table? */
if (mst_mode) {
@@ -3557,8 +3805,16 @@ static enum dc_status deallocate_mst_payload(struct pipe_ctx *pipe_ctx)
&proposed_table,
false)) {
+#if defined(CONFIG_DRM_AMD_DC_DCN)
+ update_mst_stream_alloc_table(
+ link,
+ pipe_ctx->stream_res.stream_enc,
+ pipe_ctx->stream_res.hpo_dp_stream_enc,
+ &proposed_table);
+#else
update_mst_stream_alloc_table(
link, pipe_ctx->stream_res.stream_enc, &proposed_table);
+#endif
}
else {
DC_LOG_WARNING("Failed to update"
@@ -3574,6 +3830,20 @@ static enum dc_status deallocate_mst_payload(struct pipe_ctx *pipe_ctx)
link->mst_stream_alloc_table.stream_count);
for (i = 0; i < MAX_CONTROLLER_NUM; i++) {
+#if defined(CONFIG_DRM_AMD_DC_DCN)
+ DC_LOG_MST("stream_enc[%d]: %p "
+ "stream[%d].hpo_dp_stream_enc: %p "
+ "stream[%d].vcp_id: %d "
+ "stream[%d].slot_count: %d\n",
+ i,
+ (void *) link->mst_stream_alloc_table.stream_allocations[i].stream_enc,
+ i,
+ (void *) link->mst_stream_alloc_table.stream_allocations[i].hpo_dp_stream_enc,
+ i,
+ link->mst_stream_alloc_table.stream_allocations[i].vcp_id,
+ i,
+ link->mst_stream_alloc_table.stream_allocations[i].slot_count);
+#else
DC_LOG_MST("stream_enc[%d]: %p "
"stream[%d].vcp_id: %d "
"stream[%d].slot_count: %d\n",
@@ -3583,11 +3853,44 @@ static enum dc_status deallocate_mst_payload(struct pipe_ctx *pipe_ctx)
link->mst_stream_alloc_table.stream_allocations[i].vcp_id,
i,
link->mst_stream_alloc_table.stream_allocations[i].slot_count);
+#endif
}
+ if (link->ep_type == DISPLAY_ENDPOINT_USB4_DPIA) {
+ enum dc_status status;
+ uint8_t mst_alloc_slots = 0, prev_mst_slots_in_use = 0xFF;
+
+ for (i = 0; i < link->mst_stream_alloc_table.stream_count; i++)
+ mst_alloc_slots += link->mst_stream_alloc_table.stream_allocations[i].slot_count;
+
+ status = dc_process_dmub_set_mst_slots(link->dc, link->link_index,
+ mst_alloc_slots, &prev_mst_slots_in_use);
+ ASSERT(status != DC_NOT_SUPPORTED);
+ DC_LOG_MST("dpia : status[%d]: alloc_slots[%d]: used_slots[%d]\n",
+ status, mst_alloc_slots, prev_mst_slots_in_use);
+ }
+
+#if defined(CONFIG_DRM_AMD_DC_DCN)
+ switch (dp_get_link_encoding_format(&link->cur_link_settings)) {
+ case DP_8b_10b_ENCODING:
+ link_encoder->funcs->update_mst_stream_allocation_table(
+ link_encoder,
+ &link->mst_stream_alloc_table);
+ break;
+ case DP_128b_132b_ENCODING:
+ hpo_dp_link_encoder->funcs->update_stream_allocation_table(
+ hpo_dp_link_encoder,
+ &link->mst_stream_alloc_table);
+ break;
+ case DP_UNKNOWN_ENCODING:
+ DC_LOG_ERROR("Failure: unknown encoding format\n");
+ return DC_ERROR_UNEXPECTED;
+ }
+#else
link_encoder->funcs->update_mst_stream_allocation_table(
link_encoder,
&link->mst_stream_alloc_table);
+#endif
if (mst_mode) {
dm_helpers_dp_mst_poll_for_allocation_change_trigger(
@@ -3610,6 +3913,9 @@ static void update_psp_stream_config(struct pipe_ctx *pipe_ctx, bool dpms_off)
struct cp_psp *cp_psp = &pipe_ctx->stream->ctx->cp_psp;
#if defined(CONFIG_DRM_AMD_DC_DCN)
struct link_encoder *link_enc = NULL;
+ struct dc_state *state = pipe_ctx->stream->ctx->dc->current_state;
+ struct link_enc_assignment link_enc_assign;
+ int i;
#endif
if (cp_psp && cp_psp->funcs.update_stream_config) {
@@ -3623,9 +3929,72 @@ static void update_psp_stream_config(struct pipe_ctx *pipe_ctx, bool dpms_off)
config.dig_be = pipe_ctx->stream->link->link_enc_hw_inst;
#if defined(CONFIG_DRM_AMD_DC_DCN)
config.stream_enc_idx = pipe_ctx->stream_res.stream_enc->id - ENGINE_ID_DIGA;
- if (pipe_ctx->stream->link->ep_type == DISPLAY_ENDPOINT_PHY) {
+
+ if (pipe_ctx->stream->link->ep_type == DISPLAY_ENDPOINT_PHY ||
+ pipe_ctx->stream->link->ep_type == DISPLAY_ENDPOINT_USB4_DPIA) {
link_enc = pipe_ctx->stream->link->link_enc;
+ config.dio_output_type = pipe_ctx->stream->link->ep_type;
+ config.dio_output_idx = link_enc->transmitter - TRANSMITTER_UNIPHY_A;
+ if (pipe_ctx->stream->link->ep_type == DISPLAY_ENDPOINT_PHY)
+ link_enc = pipe_ctx->stream->link->link_enc;
+ else if (pipe_ctx->stream->link->ep_type == DISPLAY_ENDPOINT_USB4_DPIA)
+ if (pipe_ctx->stream->link->dc->res_pool->funcs->link_encs_assign) {
+ link_enc = link_enc_cfg_get_link_enc_used_by_stream(
+ pipe_ctx->stream->ctx->dc,
+ pipe_ctx->stream);
+ }
+ // Initialize PHY ID with ABCDE - 01234 mapping except when it is B0
config.phy_idx = link_enc->transmitter - TRANSMITTER_UNIPHY_A;
+
+ //look up the link_enc_assignment for the current pipe_ctx
+ for (i = 0; i < state->stream_count; i++) {
+ if (pipe_ctx->stream == state->streams[i]) {
+ link_enc_assign = state->res_ctx.link_enc_cfg_ctx.link_enc_assignments[i];
+ }
+ }
+ // Add flag to guard new A0 DIG mapping
+ if (pipe_ctx->stream->ctx->dc->enable_c20_dtm_b0 == true) {
+ config.dig_be = link_enc_assign.eng_id;
+ config.dio_output_type = pipe_ctx->stream->link->ep_type;
+ config.dio_output_idx = link_enc->transmitter - TRANSMITTER_UNIPHY_A;
+ } else {
+ config.dio_output_type = 0;
+ config.dio_output_idx = 0;
+ }
+
+ // Add flag to guard B0 implementation
+ if (pipe_ctx->stream->ctx->dc->enable_c20_dtm_b0 == true &&
+ link_enc->ctx->asic_id.hw_internal_rev == YELLOW_CARP_B0) {
+ if (pipe_ctx->stream->link->ep_type == DISPLAY_ENDPOINT_USB4_DPIA) {
+ link_enc = link_enc_assign.stream->link_enc;
+
+ // enum ID 1-4 maps to DPIA PHY ID 0-3
+ config.phy_idx = link_enc_assign.ep_id.link_id.enum_id - ENUM_ID_1;
+ } else { // for non DPIA mode over B0, ABCDE maps to 01564
+
+ switch (link_enc->transmitter) {
+ case TRANSMITTER_UNIPHY_A:
+ config.phy_idx = 0;
+ break;
+ case TRANSMITTER_UNIPHY_B:
+ config.phy_idx = 1;
+ break;
+ case TRANSMITTER_UNIPHY_C:
+ config.phy_idx = 5;
+ break;
+ case TRANSMITTER_UNIPHY_D:
+ config.phy_idx = 6;
+ break;
+ case TRANSMITTER_UNIPHY_E:
+ config.phy_idx = 4;
+ break;
+ default:
+ config.phy_idx = 0;
+ break;
+ }
+
+ }
+ }
} else if (pipe_ctx->stream->link->dc->res_pool->funcs->link_encs_assign) {
link_enc = link_enc_cfg_get_link_enc_used_by_stream(
pipe_ctx->stream->ctx->dc,
@@ -3910,6 +4279,8 @@ void core_link_enable_stream(
*/
if (status != DC_FAIL_DP_LINK_TRAINING ||
pipe_ctx->stream->signal == SIGNAL_TYPE_DISPLAY_PORT_MST) {
+ if (false == stream->link->link_status.link_active)
+ disable_link(stream->link, pipe_ctx->stream->signal);
BREAK_TO_DEBUGGER();
return;
}
@@ -4399,7 +4770,7 @@ uint32_t dc_bandwidth_in_kbps_from_timing(
timing->dsc_cfg.bits_per_pixel,
timing->dsc_cfg.num_slices_h,
timing->dsc_cfg.is_dp);
-#endif
+#endif /* CONFIG_DRM_AMD_DC_DCN */
switch (timing->display_color_depth) {
case COLOR_DEPTH_666:
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link_ddc.c b/drivers/gpu/drm/amd/display/dc/core/dc_link_ddc.c
index b0f1cd7268c8..60539b1f2a80 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_link_ddc.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_link_ddc.c
@@ -554,6 +554,7 @@ bool dal_ddc_service_query_ddc_data(
payload.address = address;
payload.reply = NULL;
payload.defer_delay = get_defer_delay(ddc);
+ payload.write_status_update = false;
if (write_size != 0) {
payload.write = true;
@@ -625,24 +626,24 @@ bool dal_ddc_submit_aux_command(struct ddc_service *ddc,
do {
struct aux_payload current_payload;
bool is_end_of_payload = (retrieved + DEFAULT_AUX_MAX_DATA_SIZE) >=
- payload->length;
+ payload->length ? true : false;
+ uint32_t payload_length = is_end_of_payload ?
+ payload->length - retrieved : DEFAULT_AUX_MAX_DATA_SIZE;
current_payload.address = payload->address;
current_payload.data = &payload->data[retrieved];
current_payload.defer_delay = payload->defer_delay;
current_payload.i2c_over_aux = payload->i2c_over_aux;
- current_payload.length = is_end_of_payload ?
- payload->length - retrieved : DEFAULT_AUX_MAX_DATA_SIZE;
- /* set mot (middle of transaction) to false
- * if it is the last payload
- */
+ current_payload.length = payload_length;
+ /* set mot (middle of transaction) to false if it is the last payload */
current_payload.mot = is_end_of_payload ? payload->mot:true;
+ current_payload.write_status_update = false;
current_payload.reply = payload->reply;
current_payload.write = payload->write;
ret = dc_link_aux_transfer_with_retries(ddc, &current_payload);
- retrieved += current_payload.length;
+ retrieved += payload_length;
} while (retrieved < payload->length && ret == true);
return ret;
@@ -763,7 +764,7 @@ void dal_ddc_service_read_scdc_data(struct ddc_service *ddc_service)
dal_ddc_service_query_ddc_data(ddc_service, slave_address, &offset,
sizeof(offset), &tmds_config, sizeof(tmds_config));
if (tmds_config & 0x1) {
- union hdmi_scdc_status_flags_data status_data = { {0} };
+ union hdmi_scdc_status_flags_data status_data = {0};
uint8_t scramble_status = 0;
offset = HDMI_SCDC_SCRAMBLER_STATUS;
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c b/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c
index 54662d74c65a..cb7bf9148904 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c
@@ -106,6 +106,10 @@ static bool decide_fallback_link_setting(
static struct dc_link_settings get_common_supported_link_settings(
struct dc_link_settings link_setting_a,
struct dc_link_settings link_setting_b);
+static void maximize_lane_settings(const struct link_training_settings *lt_settings,
+ struct dc_lane_settings lane_settings[LANE_COUNT_DP_MAX]);
+static void override_lane_settings(const struct link_training_settings *lt_settings,
+ struct dc_lane_settings lane_settings[LANE_COUNT_DP_MAX]);
static uint32_t get_cr_training_aux_rd_interval(struct dc_link *link,
const struct dc_link_settings *link_settings)
@@ -259,7 +263,7 @@ static void dpcd_set_training_pattern(
struct dc_link *link,
enum dc_dp_training_pattern training_pattern)
{
- union dpcd_training_pattern dpcd_pattern = { {0} };
+ union dpcd_training_pattern dpcd_pattern = {0};
dpcd_pattern.v1_4.TRAINING_PATTERN_SET =
dc_dp_training_pattern_to_dpcd_training_pattern(
@@ -401,8 +405,8 @@ enum dc_status dpcd_set_link_settings(
uint8_t rate;
enum dc_status status;
- union down_spread_ctrl downspread = { {0} };
- union lane_count_set lane_count_set = { {0} };
+ union down_spread_ctrl downspread = {0};
+ union lane_count_set lane_count_set = {0};
downspread.raw = (uint8_t)
(lt_settings->link_settings.link_spread);
@@ -515,12 +519,10 @@ static void dpcd_set_lt_pattern_and_lane_settings(
enum dc_dp_training_pattern pattern,
uint32_t offset)
{
- union dpcd_training_lane dpcd_lane[LANE_COUNT_DP_MAX] = { { {0} } };
-
uint32_t dpcd_base_lt_offset;
uint8_t dpcd_lt_buffer[5] = {0};
- union dpcd_training_pattern dpcd_pattern = { {0} };
+ union dpcd_training_pattern dpcd_pattern = { 0 };
uint32_t size_in_bytes;
bool edp_workaround = false; /* TODO link_prop.INTERNAL */
dpcd_base_lt_offset = DP_TRAINING_PATTERN_SET;
@@ -554,16 +556,14 @@ static void dpcd_set_lt_pattern_and_lane_settings(
dpcd_pattern.v1_4.TRAINING_PATTERN_SET);
}
- dp_hw_to_dpcd_lane_settings(lt_settings, lt_settings->lane_settings, dpcd_lane);
-
/* concatenate everything into one buffer*/
-
- size_in_bytes = lt_settings->link_settings.lane_count * sizeof(dpcd_lane[0]);
+ size_in_bytes = lt_settings->link_settings.lane_count *
+ sizeof(lt_settings->dpcd_lane_settings[0]);
// 0x00103 - 0x00102
memmove(
&dpcd_lt_buffer[DP_TRAINING_LANE0_SET - DP_TRAINING_PATTERN_SET],
- dpcd_lane,
+ lt_settings->dpcd_lane_settings,
size_in_bytes);
if (is_repeater(link, offset)) {
@@ -575,7 +575,7 @@ static void dpcd_set_lt_pattern_and_lane_settings(
__func__,
offset,
dpcd_base_lt_offset,
- dpcd_lane[0].tx_ffe.PRESET_VALUE);
+ lt_settings->dpcd_lane_settings[0].tx_ffe.PRESET_VALUE);
else if (dp_get_link_encoding_format(&lt_settings->link_settings) ==
DP_8b_10b_ENCODING)
#endif
@@ -584,10 +584,10 @@ static void dpcd_set_lt_pattern_and_lane_settings(
__func__,
offset,
dpcd_base_lt_offset,
- dpcd_lane[0].bits.VOLTAGE_SWING_SET,
- dpcd_lane[0].bits.PRE_EMPHASIS_SET,
- dpcd_lane[0].bits.MAX_SWING_REACHED,
- dpcd_lane[0].bits.MAX_PRE_EMPHASIS_REACHED);
+ lt_settings->dpcd_lane_settings[0].bits.VOLTAGE_SWING_SET,
+ lt_settings->dpcd_lane_settings[0].bits.PRE_EMPHASIS_SET,
+ lt_settings->dpcd_lane_settings[0].bits.MAX_SWING_REACHED,
+ lt_settings->dpcd_lane_settings[0].bits.MAX_PRE_EMPHASIS_REACHED);
} else {
#if defined(CONFIG_DRM_AMD_DC_DCN)
if (dp_get_link_encoding_format(&lt_settings->link_settings) ==
@@ -595,17 +595,17 @@ static void dpcd_set_lt_pattern_and_lane_settings(
DC_LOG_HW_LINK_TRAINING("%s:\n 0x%X TX_FFE_PRESET_VALUE = %x\n",
__func__,
dpcd_base_lt_offset,
- dpcd_lane[0].tx_ffe.PRESET_VALUE);
+ lt_settings->dpcd_lane_settings[0].tx_ffe.PRESET_VALUE);
else if (dp_get_link_encoding_format(&lt_settings->link_settings) ==
DP_8b_10b_ENCODING)
#endif
DC_LOG_HW_LINK_TRAINING("%s:\n 0x%X VS set = %x PE set = %x max VS Reached = %x max PE Reached = %x\n",
__func__,
dpcd_base_lt_offset,
- dpcd_lane[0].bits.VOLTAGE_SWING_SET,
- dpcd_lane[0].bits.PRE_EMPHASIS_SET,
- dpcd_lane[0].bits.MAX_SWING_REACHED,
- dpcd_lane[0].bits.MAX_PRE_EMPHASIS_REACHED);
+ lt_settings->dpcd_lane_settings[0].bits.VOLTAGE_SWING_SET,
+ lt_settings->dpcd_lane_settings[0].bits.PRE_EMPHASIS_SET,
+ lt_settings->dpcd_lane_settings[0].bits.MAX_SWING_REACHED,
+ lt_settings->dpcd_lane_settings[0].bits.MAX_PRE_EMPHASIS_REACHED);
}
if (edp_workaround) {
/* for eDP write in 2 parts because the 5-byte burst is
@@ -620,7 +620,7 @@ static void dpcd_set_lt_pattern_and_lane_settings(
core_link_write_dpcd(
link,
DP_TRAINING_LANE0_SET,
- (uint8_t *)(dpcd_lane),
+ (uint8_t *)(lt_settings->dpcd_lane_settings),
size_in_bytes);
#if defined(CONFIG_DRM_AMD_DC_DCN)
@@ -711,34 +711,44 @@ void dp_hw_to_dpcd_lane_settings(
}
}
-void dp_update_drive_settings(
- struct link_training_settings *dest,
- struct link_training_settings src)
+void dp_decide_lane_settings(
+ const struct link_training_settings *lt_settings,
+ const union lane_adjust ln_adjust[LANE_COUNT_DP_MAX],
+ struct dc_lane_settings hw_lane_settings[LANE_COUNT_DP_MAX],
+ union dpcd_training_lane dpcd_lane_settings[LANE_COUNT_DP_MAX])
{
uint32_t lane;
- for (lane = 0; lane < src.link_settings.lane_count; lane++) {
- if (dest->voltage_swing == NULL)
- dest->lane_settings[lane].VOLTAGE_SWING = src.lane_settings[lane].VOLTAGE_SWING;
- else
- dest->lane_settings[lane].VOLTAGE_SWING = *dest->voltage_swing;
-
- if (dest->pre_emphasis == NULL)
- dest->lane_settings[lane].PRE_EMPHASIS = src.lane_settings[lane].PRE_EMPHASIS;
- else
- dest->lane_settings[lane].PRE_EMPHASIS = *dest->pre_emphasis;
-
- if (dest->post_cursor2 == NULL)
- dest->lane_settings[lane].POST_CURSOR2 = src.lane_settings[lane].POST_CURSOR2;
- else
- dest->lane_settings[lane].POST_CURSOR2 = *dest->post_cursor2;
+ for (lane = 0; lane < LANE_COUNT_DP_MAX; lane++) {
+ if (dp_get_link_encoding_format(&lt_settings->link_settings) ==
+ DP_8b_10b_ENCODING) {
+ hw_lane_settings[lane].VOLTAGE_SWING =
+ (enum dc_voltage_swing)(ln_adjust[lane].bits.
+ VOLTAGE_SWING_LANE);
+ hw_lane_settings[lane].PRE_EMPHASIS =
+ (enum dc_pre_emphasis)(ln_adjust[lane].bits.
+ PRE_EMPHASIS_LANE);
+ }
#if defined(CONFIG_DRM_AMD_DC_DCN)
- if (dest->ffe_preset == NULL)
- dest->lane_settings[lane].FFE_PRESET = src.lane_settings[lane].FFE_PRESET;
- else
- dest->lane_settings[lane].FFE_PRESET = *dest->ffe_preset;
+ else if (dp_get_link_encoding_format(&lt_settings->link_settings) ==
+ DP_128b_132b_ENCODING) {
+ hw_lane_settings[lane].FFE_PRESET.raw =
+ ln_adjust[lane].tx_ffe.PRESET_VALUE;
+ }
#endif
}
+ dp_hw_to_dpcd_lane_settings(lt_settings, hw_lane_settings, dpcd_lane_settings);
+
+ if (lt_settings->disallow_per_lane_settings) {
+ /* we find the maximum of the requested settings across all lanes*/
+ /* and set this maximum for all lanes*/
+ maximize_lane_settings(lt_settings, hw_lane_settings);
+ override_lane_settings(lt_settings, hw_lane_settings);
+
+ if (lt_settings->always_match_dpcd_with_hw_lane_settings)
+ dp_hw_to_dpcd_lane_settings(lt_settings, hw_lane_settings, dpcd_lane_settings);
+ }
+
}
static uint8_t get_nibble_at_index(const uint8_t *buf,
@@ -768,55 +778,29 @@ static enum dc_pre_emphasis get_max_pre_emphasis_for_voltage_swing(
}
-static void find_max_drive_settings(
- const struct link_training_settings *link_training_setting,
- struct link_training_settings *max_lt_setting)
+static void maximize_lane_settings(const struct link_training_settings *lt_settings,
+ struct dc_lane_settings lane_settings[LANE_COUNT_DP_MAX])
{
uint32_t lane;
struct dc_lane_settings max_requested;
- max_requested.VOLTAGE_SWING =
- link_training_setting->
- lane_settings[0].VOLTAGE_SWING;
- max_requested.PRE_EMPHASIS =
- link_training_setting->
- lane_settings[0].PRE_EMPHASIS;
- /*max_requested.postCursor2 =
- * link_training_setting->laneSettings[0].postCursor2;*/
+ max_requested.VOLTAGE_SWING = lane_settings[0].VOLTAGE_SWING;
+ max_requested.PRE_EMPHASIS = lane_settings[0].PRE_EMPHASIS;
#if defined(CONFIG_DRM_AMD_DC_DCN)
- max_requested.FFE_PRESET =
- link_training_setting->lane_settings[0].FFE_PRESET;
+ max_requested.FFE_PRESET = lane_settings[0].FFE_PRESET;
#endif
/* Determine what the maximum of the requested settings are*/
- for (lane = 1; lane < link_training_setting->link_settings.lane_count;
- lane++) {
- if (link_training_setting->lane_settings[lane].VOLTAGE_SWING >
- max_requested.VOLTAGE_SWING)
+ for (lane = 1; lane < lt_settings->link_settings.lane_count; lane++) {
+ if (lane_settings[lane].VOLTAGE_SWING > max_requested.VOLTAGE_SWING)
+ max_requested.VOLTAGE_SWING = lane_settings[lane].VOLTAGE_SWING;
- max_requested.VOLTAGE_SWING =
- link_training_setting->
- lane_settings[lane].VOLTAGE_SWING;
-
- if (link_training_setting->lane_settings[lane].PRE_EMPHASIS >
- max_requested.PRE_EMPHASIS)
- max_requested.PRE_EMPHASIS =
- link_training_setting->
- lane_settings[lane].PRE_EMPHASIS;
-
- /*
- if (link_training_setting->laneSettings[lane].postCursor2 >
- max_requested.postCursor2)
- {
- max_requested.postCursor2 =
- link_training_setting->laneSettings[lane].postCursor2;
- }
- */
+ if (lane_settings[lane].PRE_EMPHASIS > max_requested.PRE_EMPHASIS)
+ max_requested.PRE_EMPHASIS = lane_settings[lane].PRE_EMPHASIS;
#if defined(CONFIG_DRM_AMD_DC_DCN)
- if (link_training_setting->lane_settings[lane].FFE_PRESET.settings.level >
+ if (lane_settings[lane].FFE_PRESET.settings.level >
max_requested.FFE_PRESET.settings.level)
max_requested.FFE_PRESET.settings.level =
- link_training_setting->
lane_settings[lane].FFE_PRESET.settings.level;
#endif
}
@@ -828,10 +812,6 @@ static void find_max_drive_settings(
if (max_requested.PRE_EMPHASIS > PRE_EMPHASIS_MAX_LEVEL)
max_requested.PRE_EMPHASIS = PRE_EMPHASIS_MAX_LEVEL;
- /*
- if (max_requested.postCursor2 > PostCursor2_MaxLevel)
- max_requested.postCursor2 = PostCursor2_MaxLevel;
- */
#if defined(CONFIG_DRM_AMD_DC_DCN)
if (max_requested.FFE_PRESET.settings.level > DP_FFE_PRESET_MAX_LEVEL)
max_requested.FFE_PRESET.settings.level = DP_FFE_PRESET_MAX_LEVEL;
@@ -845,61 +825,58 @@ static void find_max_drive_settings(
get_max_pre_emphasis_for_voltage_swing(
max_requested.VOLTAGE_SWING);
- /*
- * Post Cursor2 levels are completely independent from
- * pre-emphasis (Post Cursor1) levels. But Post Cursor2 levels
- * can only be applied to each allowable combination of voltage
- * swing and pre-emphasis levels */
- /* if ( max_requested.postCursor2 >
- * getMaxPostCursor2ForVoltageSwing(max_requested.voltageSwing))
- * max_requested.postCursor2 =
- * getMaxPostCursor2ForVoltageSwing(max_requested.voltageSwing);
- */
-
- max_lt_setting->link_settings.link_rate =
- link_training_setting->link_settings.link_rate;
- max_lt_setting->link_settings.lane_count =
- link_training_setting->link_settings.lane_count;
- max_lt_setting->link_settings.link_spread =
- link_training_setting->link_settings.link_spread;
-
- for (lane = 0; lane <
- link_training_setting->link_settings.lane_count;
- lane++) {
- max_lt_setting->lane_settings[lane].VOLTAGE_SWING =
- max_requested.VOLTAGE_SWING;
- max_lt_setting->lane_settings[lane].PRE_EMPHASIS =
- max_requested.PRE_EMPHASIS;
- /*max_lt_setting->laneSettings[lane].postCursor2 =
- * max_requested.postCursor2;
- */
+ for (lane = 0; lane < LANE_COUNT_DP_MAX; lane++) {
+ lane_settings[lane].VOLTAGE_SWING = max_requested.VOLTAGE_SWING;
+ lane_settings[lane].PRE_EMPHASIS = max_requested.PRE_EMPHASIS;
#if defined(CONFIG_DRM_AMD_DC_DCN)
- max_lt_setting->lane_settings[lane].FFE_PRESET =
- max_requested.FFE_PRESET;
+ lane_settings[lane].FFE_PRESET = max_requested.FFE_PRESET;
#endif
}
+}
+
+static void override_lane_settings(const struct link_training_settings *lt_settings,
+ struct dc_lane_settings lane_settings[LANE_COUNT_DP_MAX])
+{
+ uint32_t lane;
+
+ if (lt_settings->voltage_swing == NULL &&
+ lt_settings->pre_emphasis == NULL &&
+#if defined(CONFIG_DRM_AMD_DC_DCN)
+ lt_settings->ffe_preset == NULL &&
+#endif
+ lt_settings->post_cursor2 == NULL)
+ return;
+
+ for (lane = 1; lane < LANE_COUNT_DP_MAX; lane++) {
+ if (lt_settings->voltage_swing)
+ lane_settings[lane].VOLTAGE_SWING = *lt_settings->voltage_swing;
+ if (lt_settings->pre_emphasis)
+ lane_settings[lane].PRE_EMPHASIS = *lt_settings->pre_emphasis;
+ if (lt_settings->post_cursor2)
+ lane_settings[lane].POST_CURSOR2 = *lt_settings->post_cursor2;
+#if defined(CONFIG_DRM_AMD_DC_DCN)
+ if (lt_settings->ffe_preset)
+ lane_settings[lane].FFE_PRESET = *lt_settings->ffe_preset;
+#endif
+ }
}
-enum dc_status dp_get_lane_status_and_drive_settings(
+enum dc_status dp_get_lane_status_and_lane_adjust(
struct dc_link *link,
const struct link_training_settings *link_training_setting,
- union lane_status *ln_status,
- union lane_align_status_updated *ln_status_updated,
- struct link_training_settings *req_settings,
+ union lane_status ln_status[LANE_COUNT_DP_MAX],
+ union lane_align_status_updated *ln_align,
+ union lane_adjust ln_adjust[LANE_COUNT_DP_MAX],
uint32_t offset)
{
unsigned int lane01_status_address = DP_LANE0_1_STATUS;
uint8_t lane_adjust_offset = 4;
unsigned int lane01_adjust_address;
uint8_t dpcd_buf[6] = {0};
- union lane_adjust dpcd_lane_adjust[LANE_COUNT_DP_MAX] = { { {0} } };
- struct link_training_settings request_settings = { {0} };
uint32_t lane;
enum dc_status status;
- memset(req_settings, '\0', sizeof(struct link_training_settings));
-
if (is_repeater(link, offset)) {
lane01_status_address =
DP_LANE0_1_STATUS_PHY_REPEATER1 +
@@ -919,11 +896,11 @@ enum dc_status dp_get_lane_status_and_drive_settings(
ln_status[lane].raw =
get_nibble_at_index(&dpcd_buf[0], lane);
- dpcd_lane_adjust[lane].raw =
+ ln_adjust[lane].raw =
get_nibble_at_index(&dpcd_buf[lane_adjust_offset], lane);
}
- ln_status_updated->raw = dpcd_buf[2];
+ ln_align->raw = dpcd_buf[2];
if (is_repeater(link, offset)) {
DC_LOG_HW_LINK_TRAINING("%s:\n LTTPR Repeater ID: %d\n"
@@ -962,55 +939,6 @@ enum dc_status dp_get_lane_status_and_drive_settings(
dpcd_buf[lane_adjust_offset + 1]);
}
- /*copy to req_settings*/
- request_settings.link_settings.lane_count =
- link_training_setting->link_settings.lane_count;
- request_settings.link_settings.link_rate =
- link_training_setting->link_settings.link_rate;
- request_settings.link_settings.link_spread =
- link_training_setting->link_settings.link_spread;
-
- for (lane = 0; lane <
- (uint32_t)(link_training_setting->link_settings.lane_count);
- lane++) {
-
-#if defined(CONFIG_DRM_AMD_DC_DCN)
- if (dp_get_link_encoding_format(&link_training_setting->link_settings) ==
- DP_128b_132b_ENCODING) {
- request_settings.lane_settings[lane].FFE_PRESET.raw =
- dpcd_lane_adjust[lane].tx_ffe.PRESET_VALUE;
- } else if (dp_get_link_encoding_format(&link_training_setting->link_settings) ==
- DP_8b_10b_ENCODING) {
- request_settings.lane_settings[lane].VOLTAGE_SWING =
- (enum dc_voltage_swing)(dpcd_lane_adjust[lane].bits.
- VOLTAGE_SWING_LANE);
- request_settings.lane_settings[lane].PRE_EMPHASIS =
- (enum dc_pre_emphasis)(dpcd_lane_adjust[lane].bits.
- PRE_EMPHASIS_LANE);
- }
-#else
- request_settings.lane_settings[lane].VOLTAGE_SWING =
- (enum dc_voltage_swing)(dpcd_lane_adjust[lane].bits.
- VOLTAGE_SWING_LANE);
- request_settings.lane_settings[lane].PRE_EMPHASIS =
- (enum dc_pre_emphasis)(dpcd_lane_adjust[lane].bits.
- PRE_EMPHASIS_LANE);
-#endif
- }
-
- /*Note: for postcursor2, read adjusted
- * postcursor2 settings from*/
- /*DpcdAddress_AdjustRequestPostCursor2 =
- *0x020C (not implemented yet)*/
-
- /* we find the maximum of the requested settings across all lanes*/
- /* and set this maximum for all lanes*/
- find_max_drive_settings(&request_settings, req_settings);
-
- /* if post cursor 2 is needed in the future,
- * read DpcdAddress_AdjustRequestPostCursor2 = 0x020C
- */
-
return status;
}
@@ -1019,7 +947,6 @@ enum dc_status dpcd_set_lane_settings(
const struct link_training_settings *link_training_setting,
uint32_t offset)
{
- union dpcd_training_lane dpcd_lane[LANE_COUNT_DP_MAX] = {{{0}}};
unsigned int lane0_set_address;
enum dc_status status;
@@ -1029,34 +956,11 @@ enum dc_status dpcd_set_lane_settings(
lane0_set_address = DP_TRAINING_LANE0_SET_PHY_REPEATER1 +
((DP_REPEATER_CONFIGURATION_AND_STATUS_SIZE) * (offset - 1));
- dp_hw_to_dpcd_lane_settings(link_training_setting,
- link_training_setting->lane_settings,
- dpcd_lane);
-
status = core_link_write_dpcd(link,
lane0_set_address,
- (uint8_t *)(dpcd_lane),
+ (uint8_t *)(link_training_setting->dpcd_lane_settings),
link_training_setting->link_settings.lane_count);
- /*
- if (LTSettings.link.rate == LinkRate_High2)
- {
- DpcdTrainingLaneSet2 dpcd_lane2[lane_count_DPMax] = {0};
- for ( uint32_t lane = 0;
- lane < lane_count_DPMax; lane++)
- {
- dpcd_lane2[lane].bits.post_cursor2_set =
- static_cast<unsigned char>(
- LTSettings.laneSettings[lane].postCursor2);
- dpcd_lane2[lane].bits.max_post_cursor2_reached = 0;
- }
- m_pDpcdAccessSrv->WriteDpcdData(
- DpcdAddress_Lane0Set2,
- reinterpret_cast<unsigned char*>(dpcd_lane2),
- LTSettings.link.lanes);
- }
- */
-
if (is_repeater(link, offset)) {
#if defined(CONFIG_DRM_AMD_DC_DCN)
if (dp_get_link_encoding_format(&link_training_setting->link_settings) ==
@@ -1066,7 +970,7 @@ enum dc_status dpcd_set_lane_settings(
__func__,
offset,
lane0_set_address,
- dpcd_lane[0].tx_ffe.PRESET_VALUE);
+ link_training_setting->dpcd_lane_settings[0].tx_ffe.PRESET_VALUE);
else if (dp_get_link_encoding_format(&link_training_setting->link_settings) ==
DP_8b_10b_ENCODING)
#endif
@@ -1075,10 +979,10 @@ enum dc_status dpcd_set_lane_settings(
__func__,
offset,
lane0_set_address,
- dpcd_lane[0].bits.VOLTAGE_SWING_SET,
- dpcd_lane[0].bits.PRE_EMPHASIS_SET,
- dpcd_lane[0].bits.MAX_SWING_REACHED,
- dpcd_lane[0].bits.MAX_PRE_EMPHASIS_REACHED);
+ link_training_setting->dpcd_lane_settings[0].bits.VOLTAGE_SWING_SET,
+ link_training_setting->dpcd_lane_settings[0].bits.PRE_EMPHASIS_SET,
+ link_training_setting->dpcd_lane_settings[0].bits.MAX_SWING_REACHED,
+ link_training_setting->dpcd_lane_settings[0].bits.MAX_PRE_EMPHASIS_REACHED);
} else {
#if defined(CONFIG_DRM_AMD_DC_DCN)
@@ -1087,17 +991,17 @@ enum dc_status dpcd_set_lane_settings(
DC_LOG_HW_LINK_TRAINING("%s:\n 0x%X TX_FFE_PRESET_VALUE = %x\n",
__func__,
lane0_set_address,
- dpcd_lane[0].tx_ffe.PRESET_VALUE);
+ link_training_setting->dpcd_lane_settings[0].tx_ffe.PRESET_VALUE);
else if (dp_get_link_encoding_format(&link_training_setting->link_settings) ==
DP_8b_10b_ENCODING)
#endif
DC_LOG_HW_LINK_TRAINING("%s\n 0x%X VS set = %x PE set = %x max VS Reached = %x max PE Reached = %x\n",
__func__,
lane0_set_address,
- dpcd_lane[0].bits.VOLTAGE_SWING_SET,
- dpcd_lane[0].bits.PRE_EMPHASIS_SET,
- dpcd_lane[0].bits.MAX_SWING_REACHED,
- dpcd_lane[0].bits.MAX_PRE_EMPHASIS_REACHED);
+ link_training_setting->dpcd_lane_settings[0].bits.VOLTAGE_SWING_SET,
+ link_training_setting->dpcd_lane_settings[0].bits.PRE_EMPHASIS_SET,
+ link_training_setting->dpcd_lane_settings[0].bits.MAX_SWING_REACHED,
+ link_training_setting->dpcd_lane_settings[0].bits.MAX_PRE_EMPHASIS_REACHED);
}
return status;
@@ -1110,7 +1014,7 @@ bool dp_is_max_vs_reached(
for (lane = 0; lane <
(uint32_t)(lt_settings->link_settings.lane_count);
lane++) {
- if (lt_settings->lane_settings[lane].VOLTAGE_SWING
+ if (lt_settings->dpcd_lane_settings[lane].bits.VOLTAGE_SWING_SET
== VOLTAGE_SWING_MAX_LEVEL)
return true;
}
@@ -1140,17 +1044,17 @@ static bool perform_post_lt_adj_req_sequence(
adj_req_timer < POST_LT_ADJ_REQ_TIMEOUT;
adj_req_timer++) {
- struct link_training_settings req_settings;
union lane_status dpcd_lane_status[LANE_COUNT_DP_MAX];
union lane_align_status_updated
dpcd_lane_status_updated;
+ union lane_adjust dpcd_lane_adjust[LANE_COUNT_DP_MAX] = { { {0} } };
- dp_get_lane_status_and_drive_settings(
+ dp_get_lane_status_and_lane_adjust(
link,
lt_settings,
dpcd_lane_status,
&dpcd_lane_status_updated,
- &req_settings,
+ dpcd_lane_adjust,
DPRX);
if (dpcd_lane_status_updated.bits.
@@ -1168,11 +1072,10 @@ static bool perform_post_lt_adj_req_sequence(
for (lane = 0; lane < (uint32_t)(lane_count); lane++) {
if (lt_settings->
- lane_settings[lane].VOLTAGE_SWING !=
- req_settings.lane_settings[lane].
- VOLTAGE_SWING ||
- lt_settings->lane_settings[lane].PRE_EMPHASIS !=
- req_settings.lane_settings[lane].PRE_EMPHASIS) {
+ dpcd_lane_settings[lane].bits.VOLTAGE_SWING_SET !=
+ dpcd_lane_adjust[lane].bits.VOLTAGE_SWING_LANE ||
+ lt_settings->dpcd_lane_settings[lane].bits.PRE_EMPHASIS_SET !=
+ dpcd_lane_adjust[lane].bits.PRE_EMPHASIS_LANE) {
req_drv_setting_changed = true;
break;
@@ -1180,8 +1083,8 @@ static bool perform_post_lt_adj_req_sequence(
}
if (req_drv_setting_changed) {
- dp_update_drive_settings(
- lt_settings, req_settings);
+ dp_decide_lane_settings(lt_settings, dpcd_lane_adjust,
+ lt_settings->hw_lane_settings, lt_settings->dpcd_lane_settings);
dc_link_dp_set_drive_settings(link,
lt_settings);
@@ -1261,16 +1164,15 @@ static enum link_training_result perform_channel_equalization_sequence(
struct link_training_settings *lt_settings,
uint32_t offset)
{
- struct link_training_settings req_settings;
enum dc_dp_training_pattern tr_pattern;
uint32_t retries_ch_eq;
uint32_t wait_time_microsec;
enum dc_lane_count lane_count = lt_settings->link_settings.lane_count;
- union lane_align_status_updated dpcd_lane_status_updated = { {0} };
- union lane_status dpcd_lane_status[LANE_COUNT_DP_MAX] = { { {0} } };
+ union lane_align_status_updated dpcd_lane_status_updated = {0};
+ union lane_status dpcd_lane_status[LANE_COUNT_DP_MAX] = {0};
+ union lane_adjust dpcd_lane_adjust[LANE_COUNT_DP_MAX] = {0};
/* Note: also check that TPS4 is a supported feature*/
-
tr_pattern = lt_settings->pattern_for_eq;
#if defined(CONFIG_DRM_AMD_DC_DCN)
@@ -1316,12 +1218,12 @@ static enum link_training_result perform_channel_equalization_sequence(
/* 4. Read lane status and requested
* drive settings as set by the sink*/
- dp_get_lane_status_and_drive_settings(
+ dp_get_lane_status_and_lane_adjust(
link,
lt_settings,
dpcd_lane_status,
&dpcd_lane_status_updated,
- &req_settings,
+ dpcd_lane_adjust,
offset);
/* 5. check CR done*/
@@ -1335,7 +1237,8 @@ static enum link_training_result perform_channel_equalization_sequence(
return LINK_TRAINING_SUCCESS;
/* 7. update VS/PE/PC2 in lt_settings*/
- dp_update_drive_settings(lt_settings, req_settings);
+ dp_decide_lane_settings(lt_settings, dpcd_lane_adjust,
+ lt_settings->hw_lane_settings, lt_settings->dpcd_lane_settings);
}
return LINK_TRAINING_EQ_FAIL_EQ;
@@ -1361,10 +1264,10 @@ static enum link_training_result perform_clock_recovery_sequence(
uint32_t retries_cr;
uint32_t retry_count;
uint32_t wait_time_microsec;
- struct link_training_settings req_settings;
enum dc_lane_count lane_count = lt_settings->link_settings.lane_count;
union lane_status dpcd_lane_status[LANE_COUNT_DP_MAX];
union lane_align_status_updated dpcd_lane_status_updated;
+ union lane_adjust dpcd_lane_adjust[LANE_COUNT_DP_MAX] = { { {0} } };
retries_cr = 0;
retry_count = 0;
@@ -1418,12 +1321,12 @@ static enum link_training_result perform_clock_recovery_sequence(
/* 4. Read lane status and requested drive
* settings as set by the sink
*/
- dp_get_lane_status_and_drive_settings(
+ dp_get_lane_status_and_lane_adjust(
link,
lt_settings,
dpcd_lane_status,
&dpcd_lane_status_updated,
- &req_settings,
+ dpcd_lane_adjust,
offset);
/* 5. check CR done*/
@@ -1441,33 +1344,25 @@ static enum link_training_result perform_clock_recovery_sequence(
break;
#endif
-#if defined(CONFIG_DRM_AMD_DC_DCN)
- if ((dp_get_link_encoding_format(&lt_settings->link_settings) == DP_128b_132b_ENCODING) &&
- lt_settings->lane_settings[0].FFE_PRESET.settings.level ==
- req_settings.lane_settings[0].FFE_PRESET.settings.level)
- retries_cr++;
- else if ((dp_get_link_encoding_format(&lt_settings->link_settings) == DP_8b_10b_ENCODING) &&
- lt_settings->lane_settings[0].VOLTAGE_SWING ==
- req_settings.lane_settings[0].VOLTAGE_SWING)
- retries_cr++;
- else
- retries_cr = 0;
-#else
/* 7. same lane settings*/
/* Note: settings are the same for all lanes,
* so comparing first lane is sufficient*/
- if ((lt_settings->lane_settings[0].VOLTAGE_SWING ==
- req_settings.lane_settings[0].VOLTAGE_SWING)
- && (lt_settings->lane_settings[0].PRE_EMPHASIS ==
- req_settings.lane_settings[0].PRE_EMPHASIS))
+ if ((dp_get_link_encoding_format(&lt_settings->link_settings) == DP_8b_10b_ENCODING) &&
+ lt_settings->dpcd_lane_settings[0].bits.VOLTAGE_SWING_SET ==
+ dpcd_lane_adjust[0].bits.VOLTAGE_SWING_LANE)
+ retries_cr++;
+#if defined(CONFIG_DRM_AMD_DC_DCN)
+ else if ((dp_get_link_encoding_format(&lt_settings->link_settings) == DP_128b_132b_ENCODING) &&
+ lt_settings->dpcd_lane_settings[0].tx_ffe.PRESET_VALUE ==
+ dpcd_lane_adjust[0].tx_ffe.PRESET_VALUE)
retries_cr++;
+#endif
else
retries_cr = 0;
-#endif
/* 8. update VS/PE/PC2 in lt_settings*/
- dp_update_drive_settings(lt_settings, req_settings);
-
+ dp_decide_lane_settings(lt_settings, dpcd_lane_adjust,
+ lt_settings->hw_lane_settings, lt_settings->dpcd_lane_settings);
retry_count++;
}
@@ -1487,7 +1382,7 @@ static inline enum link_training_result dp_transition_to_video_idle(
struct link_training_settings *lt_settings,
enum link_training_result status)
{
- union lane_count_set lane_count_set = { {0} };
+ union lane_count_set lane_count_set = {0};
/* 4. mainlink output idle pattern*/
dp_set_hw_test_pattern(link, DP_TEST_PATTERN_VIDEO_MODE, NULL, 0);
@@ -1596,6 +1491,9 @@ static inline void decide_8b_10b_training_settings(
lt_settings->pattern_for_eq = decide_eq_training_pattern(link, link_setting);
lt_settings->enhanced_framing = 1;
lt_settings->should_set_fec_ready = true;
+ lt_settings->disallow_per_lane_settings = true;
+ lt_settings->always_match_dpcd_with_hw_lane_settings = true;
+ dp_hw_to_dpcd_lane_settings(lt_settings, lt_settings->hw_lane_settings, lt_settings->dpcd_lane_settings);
}
#if defined(CONFIG_DRM_AMD_DC_DCN)
@@ -1621,6 +1519,9 @@ static inline void decide_128b_132b_training_settings(struct dc_link *link,
link->dpcd_caps.lttpr_caps.phy_repeater_cnt) + 1) * 20000;
lt_settings->lttpr_mode = dp_convert_to_count(link->dpcd_caps.lttpr_caps.phy_repeater_cnt) ?
LTTPR_MODE_NON_TRANSPARENT : LTTPR_MODE_TRANSPARENT;
+ lt_settings->disallow_per_lane_settings = true;
+ dp_hw_to_dpcd_lane_settings(lt_settings,
+ lt_settings->hw_lane_settings, lt_settings->dpcd_lane_settings);
}
#endif
@@ -1661,7 +1562,13 @@ static void override_training_settings(
if (overrides->ffe_preset != NULL)
lt_settings->ffe_preset = overrides->ffe_preset;
#endif
-
+ /* Override HW lane settings with BIOS forced values if present */
+ if (link->chip_caps & EXT_DISPLAY_PATH_CAPS__DP_FIXED_VS_EN &&
+ link->lttpr_mode == LTTPR_MODE_TRANSPARENT) {
+ lt_settings->voltage_swing = &link->bios_forced_drive_settings.VOLTAGE_SWING;
+ lt_settings->pre_emphasis = &link->bios_forced_drive_settings.PRE_EMPHASIS;
+ lt_settings->always_match_dpcd_with_hw_lane_settings = false;
+ }
for (lane = 0; lane < LANE_COUNT_DP_MAX; lane++) {
lt_settings->lane_settings[lane].VOLTAGE_SWING =
lt_settings->voltage_swing != NULL ?
@@ -1677,6 +1584,9 @@ static void override_training_settings(
: POST_CURSOR2_DISABLED;
}
+ dp_hw_to_dpcd_lane_settings(lt_settings,
+ lt_settings->hw_lane_settings, lt_settings->dpcd_lane_settings);
+
/* Initialize training timings */
if (overrides->cr_pattern_time != NULL)
lt_settings->cr_pattern_time = *overrides->cr_pattern_time;
@@ -1800,7 +1710,7 @@ static enum dc_status configure_lttpr_mode_non_transparent(
static void repeater_training_done(struct dc_link *link, uint32_t offset)
{
- union dpcd_training_pattern dpcd_pattern = { {0} };
+ union dpcd_training_pattern dpcd_pattern = {0};
const uint32_t dpcd_base_lt_offset =
DP_TRAINING_PATTERN_SET_PHY_REPEATER1 +
@@ -1947,6 +1857,9 @@ void dc_link_dp_set_drive_settings(
/* program ASIC PHY settings*/
dp_set_hw_lane_settings(link, lt_settings, DPRX);
+ dp_hw_to_dpcd_lane_settings(lt_settings,
+ lt_settings->hw_lane_settings, lt_settings->dpcd_lane_settings);
+
/* Notify DP sink the PHY settings from source */
dpcd_set_lane_settings(link, lt_settings, DPRX);
}
@@ -2074,38 +1987,43 @@ static enum link_training_result dp_perform_128b_132b_channel_eq_done_sequence(
struct dc_link *link,
struct link_training_settings *lt_settings)
{
- uint8_t loop_count = 0;
+ uint8_t loop_count;
uint32_t aux_rd_interval = 0;
uint32_t wait_time = 0;
- struct link_training_settings req_settings;
- union lane_align_status_updated dpcd_lane_status_updated = { {0} };
- union lane_status dpcd_lane_status[LANE_COUNT_DP_MAX] = { { {0} } };
+ union lane_align_status_updated dpcd_lane_status_updated = {0};
+ union lane_status dpcd_lane_status[LANE_COUNT_DP_MAX] = {0};
enum link_training_result status = LINK_TRAINING_SUCCESS;
+ union lane_adjust dpcd_lane_adjust[LANE_COUNT_DP_MAX] = {0};
- /* Transmit 128b/132b_TPS1 over Main-Link and Set TRAINING_PATTERN_SET to 01h */
+ /* Transmit 128b/132b_TPS1 over Main-Link */
dp_set_hw_training_pattern(link, lt_settings->pattern_for_cr, DPRX);
+ /* Set TRAINING_PATTERN_SET to 01h */
dpcd_set_training_pattern(link, lt_settings->pattern_for_cr);
- /* Adjust TX_FFE_PRESET_VALUE as requested */
- dp_get_lane_status_and_drive_settings(link, lt_settings, dpcd_lane_status,
- &dpcd_lane_status_updated, &req_settings, DPRX);
- dp_update_drive_settings(lt_settings, req_settings);
+ /* Adjust TX_FFE_PRESET_VALUE and Transmit 128b/132b_TPS2 over Main-Link */
dpcd_128b_132b_get_aux_rd_interval(link, &aux_rd_interval);
+ dp_get_lane_status_and_lane_adjust(link, lt_settings, dpcd_lane_status,
+ &dpcd_lane_status_updated, dpcd_lane_adjust, DPRX);
+ dp_decide_lane_settings(lt_settings, dpcd_lane_adjust,
+ lt_settings->hw_lane_settings, lt_settings->dpcd_lane_settings);
dp_set_hw_lane_settings(link, lt_settings, DPRX);
- dpcd_set_lane_settings(link, lt_settings, DPRX);
-
- /* Transmit 128b/132b_TPS2 over Main-Link and Set TRAINING_PATTERN_SET to 02h */
dp_set_hw_training_pattern(link, lt_settings->pattern_for_eq, DPRX);
- dpcd_set_training_pattern(link, lt_settings->pattern_for_eq);
+
+ /* Set loop counter to start from 1 */
+ loop_count = 1;
+
+ /* Set TRAINING_PATTERN_SET to 02h and TX_FFE_PRESET_VALUE in one AUX transaction */
+ dpcd_set_lt_pattern_and_lane_settings(link, lt_settings,
+ lt_settings->pattern_for_eq, DPRX);
/* poll for channel EQ done */
while (status == LINK_TRAINING_SUCCESS) {
- loop_count++;
dp_wait_for_training_aux_rd_interval(link, aux_rd_interval);
wait_time += aux_rd_interval;
- dp_get_lane_status_and_drive_settings(link, lt_settings, dpcd_lane_status,
- &dpcd_lane_status_updated, &req_settings, DPRX);
- dp_update_drive_settings(lt_settings, req_settings);
+ dp_get_lane_status_and_lane_adjust(link, lt_settings, dpcd_lane_status,
+ &dpcd_lane_status_updated, dpcd_lane_adjust, DPRX);
+ dp_decide_lane_settings(lt_settings, dpcd_lane_adjust,
+ lt_settings->hw_lane_settings, lt_settings->dpcd_lane_settings);
dpcd_128b_132b_get_aux_rd_interval(link, &aux_rd_interval);
if (dp_is_ch_eq_done(lt_settings->link_settings.lane_count,
dpcd_lane_status)) {
@@ -2119,6 +2037,7 @@ static enum link_training_result dp_perform_128b_132b_channel_eq_done_sequence(
dp_set_hw_lane_settings(link, lt_settings, DPRX);
dpcd_set_lane_settings(link, lt_settings, DPRX);
}
+ loop_count++;
}
/* poll for EQ interlane align done */
@@ -2134,8 +2053,8 @@ static enum link_training_result dp_perform_128b_132b_channel_eq_done_sequence(
dp_wait_for_training_aux_rd_interval(link,
lt_settings->eq_pattern_time);
wait_time += lt_settings->eq_pattern_time;
- dp_get_lane_status_and_drive_settings(link, lt_settings, dpcd_lane_status,
- &dpcd_lane_status_updated, &req_settings, DPRX);
+ dp_get_lane_status_and_lane_adjust(link, lt_settings, dpcd_lane_status,
+ &dpcd_lane_status_updated, dpcd_lane_adjust, DPRX);
}
}
@@ -2148,9 +2067,9 @@ static enum link_training_result dp_perform_128b_132b_cds_done_sequence(
{
/* Assumption: assume hardware has transmitted eq pattern */
enum link_training_result status = LINK_TRAINING_SUCCESS;
- struct link_training_settings req_settings;
- union lane_align_status_updated dpcd_lane_status_updated = { {0} };
- union lane_status dpcd_lane_status[LANE_COUNT_DP_MAX] = { { {0} } };
+ union lane_align_status_updated dpcd_lane_status_updated = {0};
+ union lane_status dpcd_lane_status[LANE_COUNT_DP_MAX] = {0};
+ union lane_adjust dpcd_lane_adjust[LANE_COUNT_DP_MAX] = { { {0} } };
uint32_t wait_time = 0;
/* initiate CDS done sequence */
@@ -2161,8 +2080,8 @@ static enum link_training_result dp_perform_128b_132b_cds_done_sequence(
dp_wait_for_training_aux_rd_interval(link,
lt_settings->cds_pattern_time);
wait_time += lt_settings->cds_pattern_time;
- dp_get_lane_status_and_drive_settings(link, lt_settings, dpcd_lane_status,
- &dpcd_lane_status_updated, &req_settings, DPRX);
+ dp_get_lane_status_and_lane_adjust(link, lt_settings, dpcd_lane_status,
+ &dpcd_lane_status_updated, dpcd_lane_adjust, DPRX);
if (dp_is_symbol_locked(lt_settings->link_settings.lane_count, dpcd_lane_status) &&
dpcd_lane_status_updated.bits.CDS_INTERLANE_ALIGN_DONE_128b_132b) {
/* pass */
@@ -2219,7 +2138,7 @@ static enum link_training_result dp_perform_8b_10b_link_training(
}
for (lane = 0; lane < (uint8_t)lt_settings->link_settings.lane_count; lane++)
- lt_settings->lane_settings[lane].VOLTAGE_SWING = VOLTAGE_SWING_LEVEL0;
+ lt_settings->dpcd_lane_settings[lane].bits.VOLTAGE_SWING_SET = VOLTAGE_SWING_LEVEL0;
}
if (status == LINK_TRAINING_SUCCESS) {
@@ -2863,7 +2782,7 @@ bool dp_verify_link_cap(
link->verified_link_cap = *known_limit_link_setting;
return true;
} else if (link->link_enc && link->dc->res_pool->funcs->link_encs_assign &&
- !link_enc_cfg_is_link_enc_avail(link->ctx->dc, link->link_enc->preferred_engine)) {
+ !link_enc_cfg_is_link_enc_avail(link->ctx->dc, link->link_enc->preferred_engine, link)) {
link->verified_link_cap = initial_link_settings;
return true;
}
@@ -3523,6 +3442,8 @@ static bool handle_hpd_irq_psr_sink(struct dc_link *link)
if (psr_error_status.bits.LINK_CRC_ERROR ||
psr_error_status.bits.RFB_STORAGE_ERROR ||
psr_error_status.bits.VSC_SDP_ERROR) {
+ bool allow_active;
+
/* Acknowledge and clear error bits */
dm_helpers_dp_write_dpcd(
link->ctx,
@@ -3532,8 +3453,10 @@ static bool handle_hpd_irq_psr_sink(struct dc_link *link)
sizeof(psr_error_status.raw));
/* PSR error, disable and re-enable PSR */
- dc_link_set_psr_allow_active(link, false, true, false);
- dc_link_set_psr_allow_active(link, true, true, false);
+ allow_active = false;
+ dc_link_set_psr_allow_active(link, &allow_active, true, false, NULL);
+ allow_active = true;
+ dc_link_set_psr_allow_active(link, &allow_active, true, false, NULL);
return true;
} else if (psr_sink_psr_status.bits.SINK_SELF_REFRESH_STATUS ==
@@ -3591,15 +3514,13 @@ static void dp_test_send_phy_test_pattern(struct dc_link *link)
#endif
unsigned int test_pattern_size = 0;
enum dp_test_pattern test_pattern;
- struct dc_link_training_settings link_settings;
union lane_adjust dpcd_lane_adjust;
unsigned int lane;
struct link_training_settings link_training_settings;
- int i = 0;
dpcd_test_pattern.raw = 0;
memset(dpcd_lane_adjustment, 0, sizeof(dpcd_lane_adjustment));
- memset(&link_settings, 0, sizeof(link_settings));
+ memset(&link_training_settings, 0, sizeof(link_training_settings));
/* get phy test pattern and pattern parameters from DP receiver */
core_link_read_dpcd(
@@ -3720,48 +3641,37 @@ static void dp_test_send_phy_test_pattern(struct dc_link *link)
#endif
/* prepare link training settings */
- link_settings.link = link->cur_link_settings;
+ link_training_settings.link_settings = link->cur_link_settings;
for (lane = 0; lane <
(unsigned int)(link->cur_link_settings.lane_count);
lane++) {
dpcd_lane_adjust.raw =
get_nibble_at_index(&dpcd_lane_adjustment[0].raw, lane);
-#if defined(CONFIG_DRM_AMD_DC_DCN)
if (dp_get_link_encoding_format(&link->cur_link_settings) ==
- DP_128b_132b_ENCODING) {
- link_settings.lane_settings[lane].FFE_PRESET.raw =
- dpcd_lane_adjust.tx_ffe.PRESET_VALUE;
- } else if (dp_get_link_encoding_format(&link->cur_link_settings) ==
DP_8b_10b_ENCODING) {
- link_settings.lane_settings[lane].VOLTAGE_SWING =
+ link_training_settings.hw_lane_settings[lane].VOLTAGE_SWING =
(enum dc_voltage_swing)
(dpcd_lane_adjust.bits.VOLTAGE_SWING_LANE);
- link_settings.lane_settings[lane].PRE_EMPHASIS =
+ link_training_settings.hw_lane_settings[lane].PRE_EMPHASIS =
(enum dc_pre_emphasis)
(dpcd_lane_adjust.bits.PRE_EMPHASIS_LANE);
- link_settings.lane_settings[lane].POST_CURSOR2 =
+ link_training_settings.hw_lane_settings[lane].POST_CURSOR2 =
(enum dc_post_cursor2)
((dpcd_post_cursor_2_adjustment >> (lane * 2)) & 0x03);
}
-#else
- link_settings.lane_settings[lane].VOLTAGE_SWING =
- (enum dc_voltage_swing)
- (dpcd_lane_adjust.bits.VOLTAGE_SWING_LANE);
- link_settings.lane_settings[lane].PRE_EMPHASIS =
- (enum dc_pre_emphasis)
- (dpcd_lane_adjust.bits.PRE_EMPHASIS_LANE);
- link_settings.lane_settings[lane].POST_CURSOR2 =
- (enum dc_post_cursor2)
- ((dpcd_post_cursor_2_adjustment >> (lane * 2)) & 0x03);
+#if defined(CONFIG_DRM_AMD_DC_DCN)
+ else if (dp_get_link_encoding_format(&link->cur_link_settings) ==
+ DP_128b_132b_ENCODING) {
+ link_training_settings.hw_lane_settings[lane].FFE_PRESET.raw =
+ dpcd_lane_adjust.tx_ffe.PRESET_VALUE;
+ }
#endif
}
- for (i = 0; i < 4; i++)
- link_training_settings.lane_settings[i] =
- link_settings.lane_settings[i];
- link_training_settings.link_settings = link_settings.link;
- link_training_settings.allow_invalid_msa_timing_param = false;
+ dp_hw_to_dpcd_lane_settings(&link_training_settings,
+ link_training_settings.hw_lane_settings,
+ link_training_settings.dpcd_lane_settings);
/*Usage: Measure DP physical lane signal
* by DP SI test equipment automatically.
* PHY test pattern request is generated by equipment via HPD interrupt.
@@ -4065,8 +3975,8 @@ void dc_link_dp_handle_link_loss(struct dc_link *link)
bool dc_link_handle_hpd_rx_irq(struct dc_link *link, union hpd_irq_data *out_hpd_irq_dpcd_data, bool *out_link_loss,
bool defer_handling, bool *has_left_work)
{
- union hpd_irq_data hpd_irq_dpcd_data = { { { {0} } } };
- union device_service_irq device_service_clear = { { 0 } };
+ union hpd_irq_data hpd_irq_dpcd_data = {0};
+ union device_service_irq device_service_clear = {0};
enum dc_status result;
bool status = false;
@@ -5419,6 +5329,14 @@ bool dc_link_dp_set_test_pattern(
return false;
if (link->dpcd_caps.dpcd_rev.raw >= DPCD_REV_12) {
+#if defined(CONFIG_DRM_AMD_DC_DCN)
+ if (test_pattern == DP_TEST_PATTERN_SQUARE_PULSE)
+ core_link_write_dpcd(link,
+ DP_LINK_SQUARE_PATTERN,
+ p_custom_pattern,
+ 1);
+
+#endif
/* tell receiver that we are sending qualification
* pattern DP 1.2 or later - DP receiver's link quality
* pattern is set using DPCD LINK_QUAL_LANEx_SET
@@ -5939,7 +5857,7 @@ bool is_edp_ilr_optimization_required(struct dc_link *link, struct dc_crtc_timin
uint8_t link_bw_set;
uint8_t link_rate_set;
uint32_t req_bw;
- union lane_count_set lane_count_set = { {0} };
+ union lane_count_set lane_count_set = {0};
ASSERT(link || crtc_timing); // invalid input
@@ -5993,6 +5911,25 @@ enum dp_link_encoding dp_get_link_encoding_format(const struct dc_link_settings
}
#if defined(CONFIG_DRM_AMD_DC_DCN)
+enum dp_link_encoding dc_link_dp_mst_decide_link_encoding_format(const struct dc_link *link)
+{
+ struct dc_link_settings link_settings = {0};
+
+ if (!dc_is_dp_signal(link->connector_signal))
+ return DP_UNKNOWN_ENCODING;
+
+ if (link->preferred_link_setting.lane_count !=
+ LANE_COUNT_UNKNOWN &&
+ link->preferred_link_setting.link_rate !=
+ LINK_RATE_UNKNOWN) {
+ link_settings = link->preferred_link_setting;
+ } else {
+ decide_mst_link_settings(link, &link_settings);
+ }
+
+ return dp_get_link_encoding_format(&link_settings);
+}
+
// TODO - DP2.0 Link: Fix get_lane_status to handle LTTPR offset (SST and MST)
static void get_lane_status(
struct dc_link *link,
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link_dpia.c b/drivers/gpu/drm/amd/display/dc/core/dc_link_dpia.c
index e9006d099393..b1c9f77d6bf4 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_link_dpia.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_link_dpia.c
@@ -263,10 +263,10 @@ static enum link_training_result dpia_training_cr_non_transparent(struct dc_link
uint32_t retry_count = 0;
/* From DP spec, CR read interval is always 100us. */
uint32_t wait_time_microsec = TRAINING_AUX_RD_INTERVAL;
- struct link_training_settings req_settings;
enum dc_lane_count lane_count = lt_settings->link_settings.lane_count;
union lane_status dpcd_lane_status[LANE_COUNT_DP_MAX] = { { {0} } };
union lane_align_status_updated dpcd_lane_status_updated = { {0} };
+ union lane_adjust dpcd_lane_adjust[LANE_COUNT_DP_MAX] = { { {0} } };
uint8_t set_cfg_data;
enum dpia_set_config_ts ts;
@@ -345,11 +345,12 @@ static enum link_training_result dpia_training_cr_non_transparent(struct dc_link
dp_wait_for_training_aux_rd_interval(link, wait_time_microsec);
/* Read status and adjustment requests from DPCD. */
- status = dp_get_lane_status_and_drive_settings(link,
+ status = dp_get_lane_status_and_lane_adjust(
+ link,
lt_settings,
dpcd_lane_status,
&dpcd_lane_status_updated,
- &req_settings,
+ dpcd_lane_adjust,
hop);
if (status != DC_OK) {
result = LINK_TRAINING_ABORT;
@@ -371,16 +372,18 @@ static enum link_training_result dpia_training_cr_non_transparent(struct dc_link
* Note: settings are the same for all lanes,
* so comparing first lane is sufficient.
*/
- if (lt_settings->lane_settings[0].VOLTAGE_SWING ==
- req_settings.lane_settings[0].VOLTAGE_SWING &&
- lt_settings->lane_settings[0].PRE_EMPHASIS ==
- req_settings.lane_settings[0].PRE_EMPHASIS)
+ if ((lt_settings->dpcd_lane_settings[0].bits.VOLTAGE_SWING_SET ==
+ dpcd_lane_adjust[0].bits.VOLTAGE_SWING_LANE)
+ && (lt_settings->dpcd_lane_settings[0].bits.PRE_EMPHASIS_SET ==
+ dpcd_lane_adjust[0].bits.PRE_EMPHASIS_LANE))
retries_cr++;
else
retries_cr = 0;
/* Update VS/PE. */
- dp_update_drive_settings(lt_settings, req_settings);
+ dp_decide_lane_settings(lt_settings, dpcd_lane_adjust,
+ lt_settings->lane_settings,
+ lt_settings->dpcd_lane_settings);
retry_count++;
}
@@ -416,10 +419,10 @@ static enum link_training_result dpia_training_cr_transparent(struct dc_link *li
uint32_t retries_cr = 0; /* Number of consecutive attempts with same VS or PE. */
uint32_t retry_count = 0;
uint32_t wait_time_microsec = lt_settings->cr_pattern_time;
- struct link_training_settings req_settings;
enum dc_lane_count lane_count = lt_settings->link_settings.lane_count;
union lane_status dpcd_lane_status[LANE_COUNT_DP_MAX] = { { {0} } };
union lane_align_status_updated dpcd_lane_status_updated = { {0} };
+ union lane_adjust dpcd_lane_adjust[LANE_COUNT_DP_MAX] = { { {0} } };
/* Cap of LINK_TRAINING_MAX_CR_RETRY attempts at clock recovery.
* Fix inherited from perform_clock_recovery_sequence() -
@@ -445,11 +448,12 @@ static enum link_training_result dpia_training_cr_transparent(struct dc_link *li
dp_wait_for_training_aux_rd_interval(link, wait_time_microsec);
/* Read status and adjustment requests from DPCD. */
- status = dp_get_lane_status_and_drive_settings(link,
+ status = dp_get_lane_status_and_lane_adjust(
+ link,
lt_settings,
dpcd_lane_status,
&dpcd_lane_status_updated,
- &req_settings,
+ dpcd_lane_adjust,
DPRX);
if (status != DC_OK) {
result = LINK_TRAINING_ABORT;
@@ -471,16 +475,17 @@ static enum link_training_result dpia_training_cr_transparent(struct dc_link *li
* Note: settings are the same for all lanes,
* so comparing first lane is sufficient.
*/
- if (lt_settings->lane_settings[0].VOLTAGE_SWING ==
- req_settings.lane_settings[0].VOLTAGE_SWING &&
- lt_settings->lane_settings[0].PRE_EMPHASIS ==
- req_settings.lane_settings[0].PRE_EMPHASIS)
+ if ((lt_settings->dpcd_lane_settings[0].bits.VOLTAGE_SWING_SET ==
+ dpcd_lane_adjust[0].bits.VOLTAGE_SWING_LANE)
+ && (lt_settings->dpcd_lane_settings[0].bits.PRE_EMPHASIS_SET ==
+ dpcd_lane_adjust[0].bits.PRE_EMPHASIS_LANE))
retries_cr++;
else
retries_cr = 0;
/* Update VS/PE. */
- dp_update_drive_settings(lt_settings, req_settings);
+ dp_decide_lane_settings(lt_settings, dpcd_lane_adjust,
+ lt_settings->hw_lane_settings, lt_settings->dpcd_lane_settings);
retry_count++;
}
@@ -566,10 +571,10 @@ static enum link_training_result dpia_training_eq_non_transparent(struct dc_link
enum dc_status status;
enum dc_dp_training_pattern tr_pattern;
uint32_t wait_time_microsec;
- struct link_training_settings req_settings;
enum dc_lane_count lane_count = lt_settings->link_settings.lane_count;
union lane_align_status_updated dpcd_lane_status_updated = { {0} };
union lane_status dpcd_lane_status[LANE_COUNT_DP_MAX] = { { {0} } };
+ union lane_adjust dpcd_lane_adjust[LANE_COUNT_DP_MAX] = { { {0} } };
uint8_t set_cfg_data;
enum dpia_set_config_ts ts;
@@ -639,11 +644,12 @@ static enum link_training_result dpia_training_eq_non_transparent(struct dc_link
dp_wait_for_training_aux_rd_interval(link, wait_time_microsec);
/* Read status and adjustment requests from DPCD. */
- status = dp_get_lane_status_and_drive_settings(link,
+ status = dp_get_lane_status_and_lane_adjust(
+ link,
lt_settings,
dpcd_lane_status,
&dpcd_lane_status_updated,
- &req_settings,
+ dpcd_lane_adjust,
hop);
if (status != DC_OK) {
result = LINK_TRAINING_ABORT;
@@ -664,7 +670,8 @@ static enum link_training_result dpia_training_eq_non_transparent(struct dc_link
}
/* Update VS/PE. */
- dp_update_drive_settings(lt_settings, req_settings);
+ dp_decide_lane_settings(lt_settings, dpcd_lane_adjust,
+ lt_settings->hw_lane_settings, lt_settings->dpcd_lane_settings);
}
/* Abort link training if equalization failed due to HPD unplug. */
@@ -701,10 +708,10 @@ static enum link_training_result dpia_training_eq_transparent(struct dc_link *li
enum dc_status status;
enum dc_dp_training_pattern tr_pattern = lt_settings->pattern_for_eq;
uint32_t wait_time_microsec;
- struct link_training_settings req_settings;
enum dc_lane_count lane_count = lt_settings->link_settings.lane_count;
union lane_align_status_updated dpcd_lane_status_updated = { {0} };
union lane_status dpcd_lane_status[LANE_COUNT_DP_MAX] = { { {0} } };
+ union lane_adjust dpcd_lane_adjust[LANE_COUNT_DP_MAX] = { { {0} } };
wait_time_microsec = dpia_get_eq_aux_rd_interval(link, lt_settings, DPRX);
@@ -720,11 +727,12 @@ static enum link_training_result dpia_training_eq_transparent(struct dc_link *li
dp_wait_for_training_aux_rd_interval(link, wait_time_microsec);
/* Read status and adjustment requests from DPCD. */
- status = dp_get_lane_status_and_drive_settings(link,
+ status = dp_get_lane_status_and_lane_adjust(
+ link,
lt_settings,
dpcd_lane_status,
&dpcd_lane_status_updated,
- &req_settings,
+ dpcd_lane_adjust,
DPRX);
if (status != DC_OK) {
result = LINK_TRAINING_ABORT;
@@ -745,7 +753,8 @@ static enum link_training_result dpia_training_eq_transparent(struct dc_link *li
}
/* Update VS/PE. */
- dp_update_drive_settings(lt_settings, req_settings);
+ dp_decide_lane_settings(lt_settings, dpcd_lane_adjust,
+ lt_settings->hw_lane_settings, lt_settings->dpcd_lane_settings);
}
/* Abort link training if equalization failed due to HPD unplug. */
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link_enc_cfg.c b/drivers/gpu/drm/amd/display/dc/core/dc_link_enc_cfg.c
index 1cab4bf06abe..25e48a8cbb78 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_link_enc_cfg.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_link_enc_cfg.c
@@ -236,6 +236,23 @@ static struct link_encoder *get_link_enc_used_by_link(
return link_enc;
}
+/* Clear all link encoder assignments. */
+static void clear_enc_assignments(struct dc_state *state)
+{
+ int i;
+ enum engine_id eng_id;
+ struct dc_stream_state *stream;
+
+ for (i = 0; i < MAX_PIPES; i++) {
+ state->res_ctx.link_enc_cfg_ctx.link_enc_assignments[i].valid = false;
+ eng_id = state->res_ctx.link_enc_cfg_ctx.link_enc_assignments[i].eng_id;
+ stream = state->res_ctx.link_enc_cfg_ctx.link_enc_assignments[i].stream;
+ if (eng_id != ENGINE_ID_UNKNOWN)
+ state->res_ctx.link_enc_cfg_ctx.link_enc_avail[eng_id - ENGINE_ID_DIGA] = eng_id;
+ if (stream)
+ stream->link_enc = NULL;
+ }
+}
void link_enc_cfg_init(
struct dc *dc,
@@ -250,6 +267,8 @@ void link_enc_cfg_init(
state->res_ctx.link_enc_cfg_ctx.link_enc_avail[i] = ENGINE_ID_UNKNOWN;
}
+ clear_enc_assignments(state);
+
state->res_ctx.link_enc_cfg_ctx.mode = LINK_ENC_CFG_STEADY;
}
@@ -265,6 +284,9 @@ void link_enc_cfg_link_encs_assign(
ASSERT(state->stream_count == stream_count);
+ if (stream_count == 0)
+ clear_enc_assignments(state);
+
/* Release DIG link encoder resources before running assignment algorithm. */
for (i = 0; i < stream_count; i++)
dc->res_pool->funcs->link_enc_unassign(state, streams[i]);
@@ -488,16 +510,19 @@ struct link_encoder *link_enc_cfg_get_link_enc_used_by_stream(
return link_enc;
}
-bool link_enc_cfg_is_link_enc_avail(struct dc *dc, enum engine_id eng_id)
+bool link_enc_cfg_is_link_enc_avail(struct dc *dc, enum engine_id eng_id, struct dc_link *link)
{
bool is_avail = true;
int i;
- /* Add assigned encoders to list. */
+ /* An encoder is not available if it has already been assigned to a different endpoint. */
for (i = 0; i < MAX_PIPES; i++) {
struct link_enc_assignment assignment = get_assignment(dc, i);
+ struct display_endpoint_id ep_id = (struct display_endpoint_id) {
+ .link_id = link->link_id,
+ .ep_type = link->ep_type};
- if (assignment.valid && assignment.eng_id == eng_id) {
+ if (assignment.valid && assignment.eng_id == eng_id && !are_ep_ids_equal(&ep_id, &assignment.ep_id)) {
is_avail = false;
break;
}
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
index 561c10a92bb5..c32fdccd4d92 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
@@ -1155,9 +1155,17 @@ bool resource_build_scaling_params(struct pipe_ctx *pipe_ctx)
pipe_ctx->plane_res.scl_data.recout.x += pipe_ctx->plane_res.scl_data.recout.width;
}
- if (pipe_ctx->plane_res.scl_data.viewport.height < MIN_VIEWPORT_SIZE ||
- pipe_ctx->plane_res.scl_data.viewport.width < MIN_VIEWPORT_SIZE)
- res = false;
+ if (!pipe_ctx->stream->ctx->dc->config.enable_windowed_mpo_odm) {
+ if (pipe_ctx->plane_res.scl_data.viewport.height < MIN_VIEWPORT_SIZE ||
+ pipe_ctx->plane_res.scl_data.viewport.width < MIN_VIEWPORT_SIZE)
+ res = false;
+ } else {
+ /* Clamp minimum viewport size */
+ if (pipe_ctx->plane_res.scl_data.viewport.height < MIN_VIEWPORT_SIZE)
+ pipe_ctx->plane_res.scl_data.viewport.height = MIN_VIEWPORT_SIZE;
+ if (pipe_ctx->plane_res.scl_data.viewport.width < MIN_VIEWPORT_SIZE)
+ pipe_ctx->plane_res.scl_data.viewport.width = MIN_VIEWPORT_SIZE;
+ }
DC_LOG_SCALER("%s pipe %d:\nViewport: height:%d width:%d x:%d y:%d Recout: height:%d width:%d x:%d y:%d HACTIVE:%d VACTIVE:%d\n"
"src_rect: height:%d width:%d x:%d y:%d dst_rect: height:%d width:%d x:%d y:%d clip_rect: height:%d width:%d x:%d y:%d\n",
@@ -3009,6 +3017,11 @@ enum dc_status dc_validate_plane(struct dc *dc, const struct dc_plane_state *pla
{
enum dc_status res = DC_OK;
+ /* check if surface has invalid dimensions */
+ if (plane_state->src_rect.width == 0 || plane_state->src_rect.height == 0 ||
+ plane_state->dst_rect.width == 0 || plane_state->dst_rect.height == 0)
+ return DC_FAIL_SURFACE_VALIDATE;
+
/* TODO For now validates pixel format only */
if (dc->res_pool->funcs->validate_plane)
return dc->res_pool->funcs->validate_plane(plane_state, &dc->caps);
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_stream.c b/drivers/gpu/drm/amd/display/dc/core/dc_stream.c
index f0f54f4d3d9b..57cf4cb82370 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_stream.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_stream.c
@@ -202,6 +202,10 @@ struct dc_stream_state *dc_copy_stream(const struct dc_stream_state *stream)
new_stream->stream_id = new_stream->ctx->dc_stream_id_count;
new_stream->ctx->dc_stream_id_count++;
+ /* If using dynamic encoder assignment, wait till stream committed to assign encoder. */
+ if (new_stream->ctx->dc->res_pool->funcs->link_encs_assign)
+ new_stream->link_enc = NULL;
+
kref_init(&new_stream->refcount);
return new_stream;
diff --git a/drivers/gpu/drm/amd/display/dc/dc.h b/drivers/gpu/drm/amd/display/dc/dc.h
index dd995905b0cb..3aac3f4a2852 100644
--- a/drivers/gpu/drm/amd/display/dc/dc.h
+++ b/drivers/gpu/drm/amd/display/dc/dc.h
@@ -47,7 +47,7 @@ struct aux_payload;
struct set_config_cmd_payload;
struct dmub_notification;
-#define DC_VER "3.2.156"
+#define DC_VER "3.2.160"
#define MAX_SURFACES 3
#define MAX_PLANES 6
@@ -211,12 +211,12 @@ struct dc_dcc_setting {
unsigned int max_uncompressed_blk_size;
bool independent_64b_blks;
#if defined(CONFIG_DRM_AMD_DC_DCN)
- //These bitfields to be used starting with DCN 3.0
+ //These bitfields to be used starting with DCN
struct {
- uint32_t dcc_256_64_64 : 1;//available in ASICs before DCN 3.0 (the worst compression case)
- uint32_t dcc_128_128_uncontrained : 1; //available in ASICs before DCN 3.0
- uint32_t dcc_256_128_128 : 1; //available starting with DCN 3.0
- uint32_t dcc_256_256_unconstrained : 1; //available in ASICs before DCN 3.0 (the best compression case)
+ uint32_t dcc_256_64_64 : 1;//available in ASICs before DCN (the worst compression case)
+ uint32_t dcc_128_128_uncontrained : 1; //available in ASICs before DCN
+ uint32_t dcc_256_128_128 : 1; //available starting with DCN
+ uint32_t dcc_256_256_unconstrained : 1; //available in ASICs before DCN (the best compression case)
} dcc_controls;
#endif
};
@@ -323,6 +323,7 @@ struct dc_config {
bool multi_mon_pp_mclk_switch;
bool disable_dmcu;
bool enable_4to1MPC;
+ bool enable_windowed_mpo_odm;
bool allow_edp_hotplug_detection;
#if defined(CONFIG_DRM_AMD_DC_DCN)
bool clamp_min_dcfclk;
@@ -342,6 +343,12 @@ enum visual_confirm {
VISUAL_CONFIRM_SWIZZLE = 9,
};
+enum dc_psr_power_opts {
+ psr_power_opt_invalid = 0x0,
+ psr_power_opt_smu_opt_static_screen = 0x1,
+ psr_power_opt_z10_static_screen = 0x10,
+};
+
enum dcc_option {
DCC_ENABLE = 0,
DCC_DISABLE = 1,
@@ -664,9 +671,11 @@ struct dc_debug_options {
#if defined(CONFIG_DRM_AMD_DC_DCN)
/* TODO - remove once tested */
bool legacy_dp2_lt;
+ bool set_mst_en_for_sst;
#endif
union mem_low_power_enable_options enable_mem_low_power;
union root_clock_optimization_options root_clock_optimization;
+ bool hpo_optimization;
bool force_vblank_alignment;
/* Enable dmub aux for legacy ddc */
@@ -724,6 +733,9 @@ struct dc {
#if defined(CONFIG_DRM_AMD_DC_DCN)
bool idle_optimizations_allowed;
#endif
+#if defined(CONFIG_DRM_AMD_DC_DCN)
+ bool enable_c20_dtm_b0;
+#endif
/* Require to maintain clocks and bandwidth for UEFI enabled HW */
@@ -930,6 +942,7 @@ union surface_update_flags {
uint32_t bandwidth_change:1;
uint32_t clock_change:1;
uint32_t stereo_format_change:1;
+ uint32_t lut_3d:1;
uint32_t full_update:1;
} bits;
@@ -1416,6 +1429,12 @@ bool dc_process_dmub_set_config_async(struct dc *dc,
uint32_t link_index,
struct set_config_cmd_payload *payload,
struct dmub_notification *notify);
+
+enum dc_status dc_process_dmub_set_mst_slots(const struct dc *dc,
+ uint32_t link_index,
+ uint8_t mst_alloc_slots,
+ uint8_t *mst_slots_in_use);
+
/*******************************************************************************
* DSC Interfaces
******************************************************************************/
diff --git a/drivers/gpu/drm/amd/display/dc/dc_dp_types.h b/drivers/gpu/drm/amd/display/dc/dc_dp_types.h
index a23937e1dc5c..e68e9a86a4d9 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_dp_types.h
+++ b/drivers/gpu/drm/amd/display/dc/dc_dp_types.h
@@ -174,11 +174,6 @@ struct dc_lane_settings {
#endif
};
-struct dc_link_training_settings {
- struct dc_link_settings link;
- struct dc_lane_settings lane_settings[LANE_COUNT_DP_MAX];
-};
-
struct dc_link_training_overrides {
enum dc_voltage_swing *voltage_swing;
enum dc_pre_emphasis *pre_emphasis;
@@ -903,6 +898,9 @@ struct dpcd_usb4_dp_tunneling_info {
#ifndef DP_DFP_CAPABILITY_EXTENSION_SUPPORT
#define DP_DFP_CAPABILITY_EXTENSION_SUPPORT 0x0A3
#endif
+#ifndef DP_LINK_SQUARE_PATTERN
+#define DP_LINK_SQUARE_PATTERN 0x10F
+#endif
#ifndef DP_DSC_CONFIGURATION
#define DP_DSC_CONFIGURATION 0x161
#endif
diff --git a/drivers/gpu/drm/amd/display/dc/dc_link.h b/drivers/gpu/drm/amd/display/dc/dc_link.h
index 69b008bafbbc..180ecd860296 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_link.h
+++ b/drivers/gpu/drm/amd/display/dc/dc_link.h
@@ -85,6 +85,7 @@ struct psr_settings {
*/
bool psr_frame_capture_indication_req;
unsigned int psr_sdp_transmit_line_num_deadline;
+ unsigned int psr_power_opt;
};
/*
@@ -123,6 +124,10 @@ struct dc_link {
struct dc_link_settings cur_link_settings;
struct dc_lane_settings cur_lane_setting[LANE_COUNT_DP_MAX];
struct dc_link_settings preferred_link_setting;
+ /* preferred_training_settings are override values that
+ * come from DM. DM is responsible for the memory
+ * management of the override pointers.
+ */
struct dc_link_training_overrides preferred_training_settings;
struct dp_audio_test_data audio_test_data;
@@ -177,11 +182,15 @@ struct dc_link {
struct psr_settings psr_settings;
+ /* Drive settings read from integrated info table */
+ struct dc_lane_settings bios_forced_drive_settings;
+
/* MST record stream using this link */
struct link_flags {
bool dp_keep_receiver_powered;
bool dp_skip_DID2;
bool dp_skip_reset_segment;
+ bool dp_mot_reset_segment;
} wa_flags;
struct link_mst_stream_allocation_table mst_stream_alloc_table;
@@ -267,8 +276,8 @@ int dc_link_get_backlight_level(const struct dc_link *dc_link);
int dc_link_get_target_backlight_pwm(const struct dc_link *link);
-bool dc_link_set_psr_allow_active(struct dc_link *dc_link, bool enable,
- bool wait, bool force_static);
+bool dc_link_set_psr_allow_active(struct dc_link *dc_link, const bool *enable,
+ bool wait, bool force_static, const unsigned int *power_opts);
bool dc_link_get_psr_state(const struct dc_link *dc_link, enum dc_psr_state *state);
@@ -277,7 +286,6 @@ bool dc_link_setup_psr(struct dc_link *dc_link,
struct psr_context *psr_context);
void dc_link_get_psr_residency(const struct dc_link *link, uint32_t *residency);
-void blank_all_dp_displays(struct dc *dc, bool hw_init);
/* Request DC to detect if there is a Panel connected.
* boot - If this call is during initial boot.
@@ -296,6 +304,10 @@ enum dc_detect_reason {
bool dc_link_detect(struct dc_link *dc_link, enum dc_detect_reason reason);
bool dc_link_get_hpd_state(struct dc_link *dc_link);
enum dc_status dc_link_allocate_mst_payload(struct pipe_ctx *pipe_ctx);
+#if defined(CONFIG_DRM_AMD_DC_DCN)
+enum dc_status dc_link_reduce_mst_payload(struct pipe_ctx *pipe_ctx, uint32_t req_pbn);
+enum dc_status dc_link_increase_mst_payload(struct pipe_ctx *pipe_ctx, uint32_t req_pbn);
+#endif
/* Notify DC about DP RX Interrupt (aka Short Pulse Interrupt).
* Return:
@@ -425,4 +437,7 @@ uint32_t dc_bandwidth_in_kbps_from_timing(
bool dc_link_is_fec_supported(const struct dc_link *link);
bool dc_link_should_enable_fec(const struct dc_link *link);
+#if defined(CONFIG_DRM_AMD_DC_DCN)
+enum dp_link_encoding dc_link_dp_mst_decide_link_encoding_format(const struct dc_link *link);
+#endif
#endif /* DC_LINK_H_ */
diff --git a/drivers/gpu/drm/amd/display/dc/dc_stream.h b/drivers/gpu/drm/amd/display/dc/dc_stream.h
index b8ebc1f09538..e37c4a10bfd5 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_stream.h
+++ b/drivers/gpu/drm/amd/display/dc/dc_stream.h
@@ -115,6 +115,13 @@ struct periodic_interrupt_config {
int lines_offset;
};
+#if defined(CONFIG_DRM_AMD_DC_DCN)
+struct dc_mst_stream_bw_update {
+ bool is_increase; // is bandwidth reduced or increased
+ uint32_t mst_stream_bw; // new mst bandwidth in kbps
+};
+#endif
+
union stream_update_flags {
struct {
uint32_t scaling:1;
@@ -125,6 +132,9 @@ union stream_update_flags {
uint32_t gamut_remap:1;
uint32_t wb_update:1;
uint32_t dsc_changed : 1;
+#if defined(CONFIG_DRM_AMD_DC_DCN)
+ uint32_t mst_bw : 1;
+#endif
} bits;
uint32_t raw;
@@ -278,6 +288,9 @@ struct dc_stream_update {
struct dc_writeback_update *wb_update;
struct dc_dsc_config *dsc_config;
+#if defined(CONFIG_DRM_AMD_DC_DCN)
+ struct dc_mst_stream_bw_update *mst_bw_update;
+#endif
struct dc_transfer_func *func_shaper;
struct dc_3dlut *lut3d_func;
diff --git a/drivers/gpu/drm/amd/display/dc/dc_types.h b/drivers/gpu/drm/amd/display/dc/dc_types.h
index 15c353c389d8..388457ffc0a8 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_types.h
+++ b/drivers/gpu/drm/amd/display/dc/dc_types.h
@@ -653,6 +653,7 @@ enum dc_psr_state {
PSR_STATE1a,
PSR_STATE2,
PSR_STATE2a,
+ PSR_STATE2b,
PSR_STATE3,
PSR_STATE3Init,
PSR_STATE4,
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_audio.c b/drivers/gpu/drm/amd/display/dc/dce/dce_audio.c
index 7866cf2a668f..27218ede150a 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_audio.c
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_audio.c
@@ -514,13 +514,15 @@ void dce_aud_az_configure(
union audio_sample_rates sample_rates =
audio_mode->sample_rates;
uint8_t byte2 = audio_mode->max_bit_rate;
+ uint8_t channel_count = audio_mode->channel_count;
/* adjust specific properties */
switch (audio_format_code) {
case AUDIO_FORMAT_CODE_LINEARPCM: {
+
check_audio_bandwidth(
crtc_info,
- audio_mode->channel_count,
+ channel_count,
signal,
&sample_rates);
@@ -548,7 +550,7 @@ void dce_aud_az_configure(
/* fill audio format data */
set_reg_field_value(value,
- audio_mode->channel_count - 1,
+ channel_count - 1,
AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0,
MAX_CHANNELS);
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_aux.c b/drivers/gpu/drm/amd/display/dc/dce/dce_aux.c
index 95cb4d7cc76a..6d42a9cc9916 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_aux.c
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_aux.c
@@ -534,17 +534,26 @@ struct dce_aux *dce110_aux_engine_construct(struct aux_engine_dce110 *aux_engine
static enum i2caux_transaction_action i2caux_action_from_payload(struct aux_payload *payload)
{
if (payload->i2c_over_aux) {
+ if (payload->write_status_update) {
+ if (payload->mot)
+ return I2CAUX_TRANSACTION_ACTION_I2C_STATUS_REQUEST_MOT;
+ else
+ return I2CAUX_TRANSACTION_ACTION_I2C_STATUS_REQUEST;
+ }
if (payload->write) {
if (payload->mot)
return I2CAUX_TRANSACTION_ACTION_I2C_WRITE_MOT;
- return I2CAUX_TRANSACTION_ACTION_I2C_WRITE;
+ else
+ return I2CAUX_TRANSACTION_ACTION_I2C_WRITE;
}
if (payload->mot)
return I2CAUX_TRANSACTION_ACTION_I2C_READ_MOT;
+
return I2CAUX_TRANSACTION_ACTION_I2C_READ;
}
if (payload->write)
return I2CAUX_TRANSACTION_ACTION_DP_WRITE;
+
return I2CAUX_TRANSACTION_ACTION_DP_READ;
}
@@ -698,7 +707,8 @@ bool dce_aux_transfer_with_retries(struct ddc_service *ddc,
aux_defer_retries = 0,
aux_i2c_defer_retries = 0,
aux_timeout_retries = 0,
- aux_invalid_reply_retries = 0;
+ aux_invalid_reply_retries = 0,
+ aux_ack_m_retries = 0;
if (ddc_pin) {
aux_engine = ddc->ctx->dc->res_pool->engines[ddc_pin->pin_data->en];
@@ -758,9 +768,27 @@ bool dce_aux_transfer_with_retries(struct ddc_service *ddc,
aux_defer_retries,
AUX_MAX_RETRIES);
goto fail;
- } else {
+ } else
udelay(300);
+ } else if (payload->write && ret > 0) {
+ /* sink requested more time to complete the write via AUX_ACKM */
+ if (++aux_ack_m_retries >= AUX_MAX_RETRIES) {
+ DC_TRACE_LEVEL_MESSAGE(DAL_TRACE_LEVEL_ERROR,
+ LOG_FLAG_Error_I2cAux,
+ "dce_aux_transfer_with_retries: FAILURE: aux_ack_m_retries=%d >= AUX_MAX_RETRIES=%d",
+ aux_ack_m_retries,
+ AUX_MAX_RETRIES);
+ goto fail;
}
+
+ /* retry reading the write status until complete
+ * NOTE: payload is modified here
+ */
+ payload->write = false;
+ payload->write_status_update = true;
+ payload->length = 0;
+ udelay(300);
+
} else
return true;
break;
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_hwseq.h b/drivers/gpu/drm/amd/display/dc/dce/dce_hwseq.h
index 296b2f80a1ec..a3fee929cd12 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_hwseq.h
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_hwseq.h
@@ -671,6 +671,7 @@ struct dce_hwseq_registers {
uint32_t MC_VM_FB_LOCATION_BASE;
uint32_t MC_VM_FB_LOCATION_TOP;
uint32_t MC_VM_FB_OFFSET;
+ uint32_t HPO_TOP_HW_CONTROL;
};
/* set field name */
#define HWS_SF(blk_name, reg_name, field_name, post_fix)\
@@ -1151,7 +1152,9 @@ struct dce_hwseq_registers {
type DOMAIN_POWER_GATE;\
type DOMAIN_PGFSM_PWR_STATUS;\
type HPO_HDMISTREAMCLK_G_GATE_DIS;\
- type DISABLE_HOSTVM_FORCE_ALLOW_PSTATE;
+ type DISABLE_HOSTVM_FORCE_ALLOW_PSTATE;\
+ type I2C_LIGHT_SLEEP_FORCE;\
+ type HPO_IO_EN;
struct dce_hwseq_shift {
HWSEQ_REG_FIELD_LIST(uint8_t)
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dmub_psr.c b/drivers/gpu/drm/amd/display/dc/dce/dmub_psr.c
index aa8403bc4c83..90eb8eedacf2 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dmub_psr.c
+++ b/drivers/gpu/drm/amd/display/dc/dce/dmub_psr.c
@@ -50,6 +50,8 @@ static enum dc_psr_state convert_psr_state(uint32_t raw_state)
state = PSR_STATE2;
else if (raw_state == 0x21)
state = PSR_STATE2a;
+ else if (raw_state == 0x22)
+ state = PSR_STATE2b;
else if (raw_state == 0x30)
state = PSR_STATE3;
else if (raw_state == 0x31)
@@ -225,6 +227,25 @@ static void dmub_psr_set_level(struct dmub_psr *dmub, uint16_t psr_level, uint8_
dc_dmub_srv_wait_idle(dc->dmub_srv);
}
+/**
+ * Set PSR power optimization flags.
+ */
+static void dmub_psr_set_power_opt(struct dmub_psr *dmub, unsigned int power_opt)
+{
+ union dmub_rb_cmd cmd;
+ struct dc_context *dc = dmub->ctx;
+
+ memset(&cmd, 0, sizeof(cmd));
+ cmd.psr_set_power_opt.header.type = DMUB_CMD__PSR;
+ cmd.psr_set_power_opt.header.sub_type = DMUB_CMD__SET_PSR_POWER_OPT;
+ cmd.psr_set_power_opt.header.payload_bytes = sizeof(struct dmub_cmd_psr_set_power_opt_data);
+ cmd.psr_set_power_opt.psr_set_power_opt_data.power_opt = power_opt;
+
+ dc_dmub_srv_cmd_queue(dc->dmub_srv, &cmd);
+ dc_dmub_srv_cmd_execute(dc->dmub_srv);
+ dc_dmub_srv_wait_idle(dc->dmub_srv);
+}
+
/*
* Setup PSR by programming phy registers and sending psr hw context values to firmware.
*/
@@ -356,6 +377,7 @@ static const struct dmub_psr_funcs psr_funcs = {
.psr_set_level = dmub_psr_set_level,
.psr_force_static = dmub_psr_force_static,
.psr_get_residency = dmub_psr_get_residency,
+ .psr_set_power_opt = dmub_psr_set_power_opt,
};
/*
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dmub_psr.h b/drivers/gpu/drm/amd/display/dc/dce/dmub_psr.h
index 9675c269e649..5dbd479660f1 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dmub_psr.h
+++ b/drivers/gpu/drm/amd/display/dc/dce/dmub_psr.h
@@ -46,6 +46,7 @@ struct dmub_psr_funcs {
void (*psr_force_static)(struct dmub_psr *dmub, uint8_t panel_inst);
void (*psr_get_residency)(struct dmub_psr *dmub, uint32_t *residency,
uint8_t panel_inst);
+ void (*psr_set_power_opt)(struct dmub_psr *dmub, unsigned int power_opt);
};
struct dmub_psr *dmub_psr_create(struct dc_context *ctx);
diff --git a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c
index 8108f9ae2638..24e47df526f6 100644
--- a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c
+++ b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c
@@ -1244,6 +1244,12 @@ void dce110_disable_stream(struct pipe_ctx *pipe_ctx)
#endif
if (dc_is_dp_signal(pipe_ctx->stream->signal))
dp_source_sequence_trace(link, DPCD_SOURCE_SEQ_AFTER_DISCONNECT_DIG_FE_BE);
+
+#if defined(CONFIG_DRM_AMD_DC_DCN)
+ if (dc->hwseq->funcs.setup_hpo_hw_control && is_dp_128b_132b_signal(pipe_ctx))
+ dc->hwseq->funcs.setup_hpo_hw_control(dc->hwseq, false);
+#endif
+
}
void dce110_unblank_stream(struct pipe_ctx *pipe_ctx,
@@ -1649,13 +1655,31 @@ static enum dc_status apply_single_controller_ctx_to_hw(
static void power_down_encoders(struct dc *dc)
{
- int i;
-
- blank_all_dp_displays(dc, false);
+ int i, j;
for (i = 0; i < dc->link_count; i++) {
enum signal_type signal = dc->links[i]->connector_signal;
+ if ((signal == SIGNAL_TYPE_EDP) ||
+ (signal == SIGNAL_TYPE_DISPLAY_PORT)) {
+ if (dc->links[i]->link_enc->funcs->get_dig_frontend &&
+ dc->links[i]->link_enc->funcs->is_dig_enabled(dc->links[i]->link_enc)) {
+ unsigned int fe = dc->links[i]->link_enc->funcs->get_dig_frontend(
+ dc->links[i]->link_enc);
+
+ for (j = 0; j < dc->res_pool->stream_enc_count; j++) {
+ if (fe == dc->res_pool->stream_enc[j]->id) {
+ dc->res_pool->stream_enc[j]->funcs->dp_blank(dc->links[i],
+ dc->res_pool->stream_enc[j]);
+ break;
+ }
+ }
+ }
+
+ if (!dc->links[i]->wa_flags.dp_keep_receiver_powered)
+ dp_receiver_power_ctrl(dc->links[i], false);
+ }
+
if (signal != SIGNAL_TYPE_EDP)
signal = SIGNAL_TYPE_NONE;
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp_dscl.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp_dscl.c
index cb9767ddf93d..44293d66b46b 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp_dscl.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp_dscl.c
@@ -205,9 +205,17 @@ static void dpp1_power_on_dscl(
struct dcn10_dpp *dpp = TO_DCN10_DPP(dpp_base);
if (dpp->tf_regs->DSCL_MEM_PWR_CTRL) {
- REG_UPDATE(DSCL_MEM_PWR_CTRL, LUT_MEM_PWR_FORCE, power_on ? 0 : 3);
- if (power_on)
+ if (power_on) {
+ REG_UPDATE(DSCL_MEM_PWR_CTRL, LUT_MEM_PWR_FORCE, 0);
REG_WAIT(DSCL_MEM_PWR_STATUS, LUT_MEM_PWR_STATE, 0, 1, 5);
+ } else {
+ if (dpp->base.ctx->dc->debug.enable_mem_low_power.bits.dscl) {
+ dpp->base.ctx->dc->optimized_required = true;
+ dpp->base.deferred_reg_writes.bits.disable_dscl = true;
+ } else {
+ REG_UPDATE(DSCL_MEM_PWR_CTRL, LUT_MEM_PWR_FORCE, 3);
+ }
+ }
}
}
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
index bc3ec05bf34b..0b788d794fb3 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
@@ -231,7 +231,7 @@ static void dcn10_log_hubp_states(struct dc *dc, void *log_ctx)
if (!s->blank_en)
DTN_INFO("[%2d]: %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh"
- "% 8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh"
+ " %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh"
" %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh\n",
pool->hubps[i]->inst, dlg_regs->refcyc_h_blank_end, dlg_regs->dlg_vblank_end, dlg_regs->min_dst_y_next_start,
dlg_regs->refcyc_per_htotal, dlg_regs->refcyc_x_after_scaler, dlg_regs->dst_y_after_scaler,
@@ -1366,7 +1366,7 @@ void dcn10_init_pipes(struct dc *dc, struct dc_state *context)
void dcn10_init_hw(struct dc *dc)
{
- int i;
+ int i, j;
struct abm *abm = dc->res_pool->abm;
struct dmcu *dmcu = dc->res_pool->dmcu;
struct dce_hwseq *hws = dc->hwseq;
@@ -1378,6 +1378,12 @@ void dcn10_init_hw(struct dc *dc)
if (dc->clk_mgr && dc->clk_mgr->funcs->init_clocks)
dc->clk_mgr->funcs->init_clocks(dc->clk_mgr);
+ /* Align bw context with hw config when system resume. */
+ if (dc->clk_mgr->clks.dispclk_khz != 0 && dc->clk_mgr->clks.dppclk_khz != 0) {
+ dc->current_state->bw_ctx.bw.dcn.clk.dispclk_khz = dc->clk_mgr->clks.dispclk_khz;
+ dc->current_state->bw_ctx.bw.dcn.clk.dppclk_khz = dc->clk_mgr->clks.dppclk_khz;
+ }
+
// Initialize the dccg
if (dc->res_pool->dccg && dc->res_pool->dccg->funcs->dccg_init)
dc->res_pool->dccg->funcs->dccg_init(res_pool->dccg);
@@ -1462,8 +1468,43 @@ void dcn10_init_hw(struct dc *dc)
dmub_enable_outbox_notification(dc);
/* we want to turn off all dp displays before doing detection */
- if (dc->config.power_down_display_on_boot)
- blank_all_dp_displays(dc, true);
+ if (dc->config.power_down_display_on_boot) {
+ uint8_t dpcd_power_state = '\0';
+ enum dc_status status = DC_ERROR_UNEXPECTED;
+
+ for (i = 0; i < dc->link_count; i++) {
+ if (dc->links[i]->connector_signal != SIGNAL_TYPE_DISPLAY_PORT)
+ continue;
+
+ /* DP 2.0 requires that LTTPR Caps be read first */
+ dp_retrieve_lttpr_cap(dc->links[i]);
+
+ /*
+ * If any of the displays are lit up turn them off.
+ * The reason is that some MST hubs cannot be turned off
+ * completely until we tell them to do so.
+ * If not turned off, then displays connected to MST hub
+ * won't light up.
+ */
+ status = core_link_read_dpcd(dc->links[i], DP_SET_POWER,
+ &dpcd_power_state, sizeof(dpcd_power_state));
+ if (status == DC_OK && dpcd_power_state == DP_POWER_STATE_D0) {
+ /* blank dp stream before power off receiver*/
+ if (dc->links[i]->link_enc->funcs->get_dig_frontend) {
+ unsigned int fe = dc->links[i]->link_enc->funcs->get_dig_frontend(dc->links[i]->link_enc);
+
+ for (j = 0; j < dc->res_pool->stream_enc_count; j++) {
+ if (fe == dc->res_pool->stream_enc[j]->id) {
+ dc->res_pool->stream_enc[j]->funcs->dp_blank(dc->links[i],
+ dc->res_pool->stream_enc[j]);
+ break;
+ }
+ }
+ }
+ dp_receiver_power_ctrl(dc->links[i], false);
+ }
+ }
+ }
/* If taking control over from VBIOS, we may want to optimize our first
* mode set, so we need to skip powering down pipes until we know which
@@ -2304,8 +2345,8 @@ static void mmhub_read_vm_context0_settings(struct dcn10_hubp *hubp1,
void dcn10_program_pte_vm(struct dce_hwseq *hws, struct hubp *hubp)
{
struct dcn10_hubp *hubp1 = TO_DCN10_HUBP(hubp);
- struct vm_system_aperture_param apt = { {{ 0 } } };
- struct vm_context0_param vm0 = { { { 0 } } };
+ struct vm_system_aperture_param apt = {0};
+ struct vm_context0_param vm0 = {0};
mmhub_read_vm_system_aperture_settings(hubp1, &apt, hws);
mmhub_read_vm_context0_settings(hubp1, &vm0, hws);
@@ -2478,7 +2519,7 @@ void dcn10_update_visual_confirm_color(struct dc *dc, struct pipe_ctx *pipe_ctx,
void dcn10_update_mpcc(struct dc *dc, struct pipe_ctx *pipe_ctx)
{
struct hubp *hubp = pipe_ctx->plane_res.hubp;
- struct mpcc_blnd_cfg blnd_cfg = {{0}};
+ struct mpcc_blnd_cfg blnd_cfg = {0};
bool per_pixel_alpha = pipe_ctx->plane_state->per_pixel_alpha && pipe_ctx->bottom_pipe;
int mpcc_id;
struct mpcc *new_mpcc;
@@ -3635,7 +3676,7 @@ void dcn10_setup_vupdate_interrupt(struct dc *dc, struct pipe_ctx *pipe_ctx)
void dcn10_unblank_stream(struct pipe_ctx *pipe_ctx,
struct dc_link_settings *link_settings)
{
- struct encoder_unblank_param params = { { 0 } };
+ struct encoder_unblank_param params = {0};
struct dc_stream_state *stream = pipe_ctx->stream;
struct dc_link *link = stream->link;
struct dce_hwseq *hws = link->dc->hwseq;
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dccg.h b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dccg.h
index ede65100a050..f98aba308028 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dccg.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dccg.h
@@ -169,7 +169,29 @@
type DTBCLK_DTO_DIV[MAX_PIPES];\
type DCCG_AUDIO_DTO_SEL;\
type DCCG_AUDIO_DTO0_SOURCE_SEL;\
- type DENTIST_DISPCLK_CHG_MODE;
+ type DENTIST_DISPCLK_CHG_MODE;\
+ type DSCCLK0_DTO_PHASE;\
+ type DSCCLK0_DTO_MODULO;\
+ type DSCCLK1_DTO_PHASE;\
+ type DSCCLK1_DTO_MODULO;\
+ type DSCCLK2_DTO_PHASE;\
+ type DSCCLK2_DTO_MODULO;\
+ type DSCCLK0_DTO_ENABLE;\
+ type DSCCLK1_DTO_ENABLE;\
+ type DSCCLK2_DTO_ENABLE;\
+ type SYMCLK32_ROOT_SE0_GATE_DISABLE;\
+ type SYMCLK32_ROOT_SE1_GATE_DISABLE;\
+ type SYMCLK32_ROOT_SE2_GATE_DISABLE;\
+ type SYMCLK32_ROOT_SE3_GATE_DISABLE;\
+ type SYMCLK32_ROOT_LE0_GATE_DISABLE;\
+ type SYMCLK32_ROOT_LE1_GATE_DISABLE;\
+ type DPSTREAMCLK_ROOT_GATE_DISABLE;\
+ type DPSTREAMCLK_GATE_DISABLE;\
+ type HDMISTREAMCLK0_DTO_PHASE;\
+ type HDMISTREAMCLK0_DTO_MODULO;\
+ type HDMICHARCLK0_GATE_DISABLE;\
+ type HDMICHARCLK0_ROOT_GATE_DISABLE;
+
struct dccg_shift {
DCCG_REG_FIELD_LIST(uint8_t)
@@ -205,6 +227,16 @@ struct dccg_registers {
uint32_t SYMCLK32_SE_CNTL;
uint32_t SYMCLK32_LE_CNTL;
uint32_t DENTIST_DISPCLK_CNTL;
+ uint32_t DSCCLK_DTO_CTRL;
+ uint32_t DSCCLK0_DTO_PARAM;
+ uint32_t DSCCLK1_DTO_PARAM;
+ uint32_t DSCCLK2_DTO_PARAM;
+ uint32_t DPSTREAMCLK_ROOT_GATE_DISABLE;
+ uint32_t DPSTREAMCLK_GATE_DISABLE;
+ uint32_t DCCG_GATE_DISABLE_CNTL3;
+ uint32_t HDMISTREAMCLK0_DTO_PARAM;
+ uint32_t DCCG_GATE_DISABLE_CNTL4;
+
};
struct dcn_dccg {
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c
index fc83744149d9..4f88376a118f 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c
@@ -2123,7 +2123,7 @@ void dcn20_update_plane_addr(const struct dc *dc, struct pipe_ctx *pipe_ctx)
void dcn20_unblank_stream(struct pipe_ctx *pipe_ctx,
struct dc_link_settings *link_settings)
{
- struct encoder_unblank_param params = { { 0 } };
+ struct encoder_unblank_param params = {0};
struct dc_stream_state *stream = pipe_ctx->stream;
struct dc_link *link = stream->link;
struct dce_hwseq *hws = link->dc->hwseq;
@@ -2298,7 +2298,7 @@ void dcn20_update_visual_confirm_color(struct dc *dc, struct pipe_ctx *pipe_ctx,
void dcn20_update_mpcc(struct dc *dc, struct pipe_ctx *pipe_ctx)
{
struct hubp *hubp = pipe_ctx->plane_res.hubp;
- struct mpcc_blnd_cfg blnd_cfg = { {0} };
+ struct mpcc_blnd_cfg blnd_cfg = {0};
bool per_pixel_alpha = pipe_ctx->plane_state->per_pixel_alpha;
int mpcc_id;
struct mpcc *new_mpcc;
@@ -2397,6 +2397,9 @@ void dcn20_enable_stream(struct pipe_ctx *pipe_ctx)
* BY this, it is logic clean to separate stream and link
*/
if (is_dp_128b_132b_signal(pipe_ctx)) {
+ if (pipe_ctx->stream->ctx->dc->hwseq->funcs.setup_hpo_hw_control)
+ pipe_ctx->stream->ctx->dc->hwseq->funcs.setup_hpo_hw_control(
+ pipe_ctx->stream->ctx->dc->hwseq, true);
setup_dp_hpo_stream(pipe_ctx, true);
pipe_ctx->stream_res.hpo_dp_stream_enc->funcs->enable_stream(
pipe_ctx->stream_res.hpo_dp_stream_enc);
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c
index 756f5d411d9a..3883f918b3bb 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c
@@ -3660,9 +3660,6 @@ static enum dml_project get_dml_project_version(uint32_t hw_internal_rev)
return DML_PROJECT_NAVI10v2;
}
-#define fixed16_to_double(x) (((double) x) / ((double) (1 << 16)))
-#define fixed16_to_double_to_cpu(x) fixed16_to_double(le32_to_cpu(x))
-
static bool init_soc_bounding_box(struct dc *dc,
struct dcn20_resource_pool *pool)
{
@@ -3698,16 +3695,22 @@ static bool init_soc_bounding_box(struct dc *dc,
clock_limits_available = (status == PP_SMU_RESULT_OK);
}
- if (clock_limits_available && uclk_states_available && num_states)
+ if (clock_limits_available && uclk_states_available && num_states) {
+ DC_FP_START();
dcn20_update_bounding_box(dc, loaded_bb, &max_clocks, uclk_states, num_states);
- else if (clock_limits_available)
+ DC_FP_END();
+ } else if (clock_limits_available) {
+ DC_FP_START();
dcn20_cap_soc_clocks(loaded_bb, max_clocks);
+ DC_FP_END();
+ }
}
loaded_ip->max_num_otg = pool->base.res_cap->num_timing_generator;
loaded_ip->max_num_dpp = pool->base.pipe_count;
+ DC_FP_START();
dcn20_patch_bounding_box(dc, loaded_bb);
-
+ DC_FP_END();
return true;
}
@@ -3727,8 +3730,6 @@ static bool dcn20_resource_construct(
enum dml_project dml_project_version =
get_dml_project_version(ctx->asic_id.hw_internal_rev);
- DC_FP_START();
-
ctx->dc_bios->regs = &bios_regs;
pool->base.funcs = &dcn20_res_pool_funcs;
@@ -4077,12 +4078,10 @@ static bool dcn20_resource_construct(
pool->base.oem_device = NULL;
}
- DC_FP_END();
return true;
create_fail:
- DC_FP_END();
dcn20_resource_destruct(pool);
return false;
diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dpp.c b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dpp.c
index ef5f6da5248a..c1d967ed6551 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dpp.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dpp.c
@@ -494,6 +494,20 @@ void dpp3_deferred_update(
int bypass_state;
struct dcn3_dpp *dpp = TO_DCN30_DPP(dpp_base);
+ if (dpp_base->deferred_reg_writes.bits.disable_dscl) {
+ REG_UPDATE(DSCL_MEM_PWR_CTRL, LUT_MEM_PWR_FORCE, 3);
+ dpp_base->deferred_reg_writes.bits.disable_dscl = false;
+ }
+
+ if (dpp_base->deferred_reg_writes.bits.disable_gamcor) {
+ REG_GET(CM_GAMCOR_CONTROL, CM_GAMCOR_MODE_CURRENT, &bypass_state);
+ if (bypass_state == 0) { // only program if bypass was latched
+ REG_UPDATE(CM_MEM_PWR_CTRL, GAMCOR_MEM_PWR_FORCE, 3);
+ } else
+ ASSERT(0); // LUT select was updated again before vupdate
+ dpp_base->deferred_reg_writes.bits.disable_gamcor = false;
+ }
+
if (dpp_base->deferred_reg_writes.bits.disable_blnd_lut) {
REG_GET(CM_BLNDGAM_CONTROL, CM_BLNDGAM_MODE_CURRENT, &bypass_state);
if (bypass_state == 0) { // only program if bypass was latched
diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dpp_cm.c b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dpp_cm.c
index 72c5687adc68..387eec616162 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dpp_cm.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dpp_cm.c
@@ -136,9 +136,13 @@ static void dpp3_power_on_gamcor_lut(
struct dcn3_dpp *dpp = TO_DCN30_DPP(dpp_base);
if (dpp_base->ctx->dc->debug.enable_mem_low_power.bits.cm) {
- REG_UPDATE(CM_MEM_PWR_CTRL, GAMCOR_MEM_PWR_FORCE, power_on ? 0 : 3);
- if (power_on)
+ if (power_on) {
+ REG_UPDATE(CM_MEM_PWR_CTRL, GAMCOR_MEM_PWR_FORCE, 0);
REG_WAIT(CM_MEM_PWR_STATUS, GAMCOR_MEM_PWR_STATE, 0, 1, 5);
+ } else {
+ dpp_base->ctx->dc->optimized_required = true;
+ dpp_base->deferred_reg_writes.bits.disable_gamcor = true;
+ }
} else
REG_SET(CM_MEM_PWR_CTRL, 0,
GAMCOR_MEM_PWR_DIS, power_on == true ? 0:1);
diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hwseq.c b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hwseq.c
index 01a90badd173..df2717116604 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hwseq.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hwseq.c
@@ -437,7 +437,7 @@ void dcn30_init_hw(struct dc *dc)
struct dce_hwseq *hws = dc->hwseq;
struct dc_bios *dcb = dc->ctx->dc_bios;
struct resource_pool *res_pool = dc->res_pool;
- int i;
+ int i, j;
int edp_num;
uint32_t backlight = MAX_BACKLIGHT_LEVEL;
@@ -534,8 +534,41 @@ void dcn30_init_hw(struct dc *dc)
hws->funcs.dsc_pg_control(hws, res_pool->dscs[i]->inst, false);
/* we want to turn off all dp displays before doing detection */
- if (dc->config.power_down_display_on_boot)
- blank_all_dp_displays(dc, true);
+ if (dc->config.power_down_display_on_boot) {
+ uint8_t dpcd_power_state = '\0';
+ enum dc_status status = DC_ERROR_UNEXPECTED;
+
+ for (i = 0; i < dc->link_count; i++) {
+ if (dc->links[i]->connector_signal != SIGNAL_TYPE_DISPLAY_PORT)
+ continue;
+ /* DP 2.0 states that LTTPR regs must be read first */
+ dp_retrieve_lttpr_cap(dc->links[i]);
+
+ /* if any of the displays are lit up turn them off */
+ status = core_link_read_dpcd(dc->links[i], DP_SET_POWER,
+ &dpcd_power_state, sizeof(dpcd_power_state));
+ if (status == DC_OK && dpcd_power_state == DP_POWER_STATE_D0) {
+ /* blank dp stream before power off receiver*/
+ if (dc->links[i]->link_enc->funcs->get_dig_frontend) {
+ unsigned int fe;
+
+ fe = dc->links[i]->link_enc->funcs->get_dig_frontend(
+ dc->links[i]->link_enc);
+ if (fe == ENGINE_ID_UNKNOWN)
+ continue;
+
+ for (j = 0; j < dc->res_pool->stream_enc_count; j++) {
+ if (fe == dc->res_pool->stream_enc[j]->id) {
+ dc->res_pool->stream_enc[j]->funcs->dp_blank(dc->links[i],
+ dc->res_pool->stream_enc[j]);
+ break;
+ }
+ }
+ }
+ dp_receiver_power_ctrl(dc->links[i], false);
+ }
+ }
+ }
/* If taking control over from VBIOS, we may want to optimize our first
* mode set, so we need to skip powering down pipes until we know which
@@ -969,7 +1002,8 @@ void dcn30_set_disp_pattern_generator(const struct dc *dc,
/* turning off DPG */
pipe_ctx->plane_res.hubp->funcs->set_blank(pipe_ctx->plane_res.hubp, false);
for (mpcc_pipe = pipe_ctx->bottom_pipe; mpcc_pipe; mpcc_pipe = mpcc_pipe->bottom_pipe)
- mpcc_pipe->plane_res.hubp->funcs->set_blank(mpcc_pipe->plane_res.hubp, false);
+ if (mpcc_pipe->plane_res.hubp)
+ mpcc_pipe->plane_res.hubp->funcs->set_blank(mpcc_pipe->plane_res.hubp, false);
stream_res->opp->funcs->opp_set_disp_pattern_generator(stream_res->opp, test_pattern, color_space,
color_depth, solid_color, width, height, offset);
diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_mpc.c b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_mpc.c
index a82319f4d081..95149734378b 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_mpc.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_mpc.c
@@ -1381,13 +1381,11 @@ int mpcc3_release_rmu(struct mpc *mpc, int mpcc_id)
}
-static void mpc3_mpc_init(struct mpc *mpc)
+static void mpc3_set_mpc_mem_lp_mode(struct mpc *mpc)
{
struct dcn30_mpc *mpc30 = TO_DCN30_MPC(mpc);
int mpcc_id;
- mpc1_mpc_init(mpc);
-
if (mpc->ctx->dc->debug.enable_mem_low_power.bits.mpc) {
if (mpc30->mpc_mask->MPC_RMU0_MEM_LOW_PWR_MODE && mpc30->mpc_mask->MPC_RMU1_MEM_LOW_PWR_MODE) {
REG_UPDATE(MPC_RMU_MEM_PWR_CTRL, MPC_RMU0_MEM_LOW_PWR_MODE, 3);
@@ -1405,7 +1403,7 @@ const struct mpc_funcs dcn30_mpc_funcs = {
.read_mpcc_state = mpc1_read_mpcc_state,
.insert_plane = mpc1_insert_plane,
.remove_mpcc = mpc1_remove_mpcc,
- .mpc_init = mpc3_mpc_init,
+ .mpc_init = mpc1_mpc_init,
.mpc_init_single_inst = mpc1_mpc_init_single_inst,
.update_blending = mpc2_update_blending,
.cursor_lock = mpc1_cursor_lock,
@@ -1432,6 +1430,7 @@ const struct mpc_funcs dcn30_mpc_funcs = {
.power_on_mpc_mem_pwr = mpc3_power_on_ogam_lut,
.get_mpc_out_mux = mpc1_get_mpc_out_mux,
.set_bg_color = mpc1_set_bg_color,
+ .set_mpc_mem_lp_mode = mpc3_set_mpc_mem_lp_mode,
};
void dcn30_mpc_construct(struct dcn30_mpc *mpc30,
diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_resource.c b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_resource.c
index 3a8a3214f770..79a66e0c4303 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_resource.c
@@ -1707,9 +1707,6 @@ bool dcn30_release_post_bldn_3dlut(
return ret;
}
-#define fixed16_to_double(x) (((double) x) / ((double) (1 << 16)))
-#define fixed16_to_double_to_cpu(x) fixed16_to_double(le32_to_cpu(x))
-
static bool is_soc_bounding_box_valid(struct dc *dc)
{
uint32_t hw_internal_rev = dc->ctx->asic_id.hw_internal_rev;
@@ -1929,23 +1926,25 @@ noinline bool dcn30_internal_validate_bw(
if (vlevel == context->bw_ctx.dml.soc.num_states)
goto validate_fail;
- for (i = 0, pipe_idx = 0; i < dc->res_pool->pipe_count; i++) {
- struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
- struct pipe_ctx *mpo_pipe = pipe->bottom_pipe;
+ if (!dc->config.enable_windowed_mpo_odm) {
+ for (i = 0, pipe_idx = 0; i < dc->res_pool->pipe_count; i++) {
+ struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
+ struct pipe_ctx *mpo_pipe = pipe->bottom_pipe;
- if (!pipe->stream)
- continue;
+ if (!pipe->stream)
+ continue;
- /* We only support full screen mpo with ODM */
- if (vba->ODMCombineEnabled[vba->pipe_plane[pipe_idx]] != dm_odm_combine_mode_disabled
- && pipe->plane_state && mpo_pipe
- && memcmp(&mpo_pipe->plane_res.scl_data.recout,
- &pipe->plane_res.scl_data.recout,
- sizeof(struct rect)) != 0) {
- ASSERT(mpo_pipe->plane_state != pipe->plane_state);
- goto validate_fail;
+ /* We only support full screen mpo with ODM */
+ if (vba->ODMCombineEnabled[vba->pipe_plane[pipe_idx]] != dm_odm_combine_mode_disabled
+ && pipe->plane_state && mpo_pipe
+ && memcmp(&mpo_pipe->plane_res.scl_data.recout,
+ &pipe->plane_res.scl_data.recout,
+ sizeof(struct rect)) != 0) {
+ ASSERT(mpo_pipe->plane_state != pipe->plane_state);
+ goto validate_fail;
+ }
+ pipe_idx++;
}
- pipe_idx++;
}
/* merge pipes if necessary */
@@ -2129,10 +2128,10 @@ static noinline void dcn30_calculate_wm_and_dlg_fp(
int pipe_cnt,
int vlevel)
{
+ int maxMpcComb = context->bw_ctx.dml.vba.maxMpcComb;
int i, pipe_idx;
- double dcfclk = context->bw_ctx.dml.vba.DCFCLKState[vlevel][context->bw_ctx.dml.vba.maxMpcComb];
- bool pstate_en = context->bw_ctx.dml.vba.DRAMClockChangeSupport[vlevel][context->bw_ctx.dml.vba.maxMpcComb] !=
- dm_dram_clock_change_unsupported;
+ double dcfclk = context->bw_ctx.dml.vba.DCFCLKState[vlevel][maxMpcComb];
+ bool pstate_en = context->bw_ctx.dml.vba.DRAMClockChangeSupport[vlevel][maxMpcComb] != dm_dram_clock_change_unsupported;
if (context->bw_ctx.dml.soc.min_dcfclk > dcfclk)
dcfclk = context->bw_ctx.dml.soc.min_dcfclk;
@@ -2208,6 +2207,7 @@ static noinline void dcn30_calculate_wm_and_dlg_fp(
context->bw_ctx.dml.soc.sr_enter_plus_exit_time_us = dc->clk_mgr->bw_params->wm_table.nv_entries[WM_C].dml_input.sr_enter_plus_exit_time_us;
context->bw_ctx.dml.soc.sr_exit_time_us = dc->clk_mgr->bw_params->wm_table.nv_entries[WM_C].dml_input.sr_exit_time_us;
}
+
context->bw_ctx.bw.dcn.watermarks.c.urgent_ns = get_wm_urgent(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
context->bw_ctx.bw.dcn.watermarks.c.cstate_pstate.cstate_enter_plus_exit_ns = get_wm_stutter_enter_exit(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
context->bw_ctx.bw.dcn.watermarks.c.cstate_pstate.cstate_exit_ns = get_wm_stutter_exit(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
@@ -2323,7 +2323,9 @@ bool dcn30_validate_bandwidth(struct dc *dc,
goto validate_out;
}
+ DC_FP_START();
dc->res_pool->funcs->calculate_wm_and_dlg(dc, context, pipes, pipe_cnt, vlevel);
+ DC_FP_END();
BW_VAL_TRACE_END_WATERMARKS();
diff --git a/drivers/gpu/drm/amd/display/dc/dcn301/Makefile b/drivers/gpu/drm/amd/display/dc/dcn301/Makefile
index 09264716d1dc..7aa628c21973 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn301/Makefile
+++ b/drivers/gpu/drm/amd/display/dc/dcn301/Makefile
@@ -13,32 +13,6 @@
DCN301 = dcn301_init.o dcn301_resource.o dcn301_dccg.o \
dcn301_dio_link_encoder.o dcn301_hwseq.o dcn301_panel_cntl.o dcn301_hubbub.o
-ifdef CONFIG_X86
-CFLAGS_$(AMDDALPATH)/dc/dcn301/dcn301_resource.o := -msse
-endif
-
-ifdef CONFIG_PPC64
-CFLAGS_$(AMDDALPATH)/dc/dcn301/dcn301_resource.o := -mhard-float -maltivec
-endif
-
-ifdef CONFIG_CC_IS_GCC
-ifeq ($(call cc-ifversion, -lt, 0701, y), y)
-IS_OLD_GCC = 1
-endif
-CFLAGS_$(AMDDALPATH)/dc/dcn301/dcn301_resource.o += -mhard-float
-endif
-
-ifdef CONFIG_X86
-ifdef IS_OLD_GCC
-# Stack alignment mismatch, proceed with caution.
-# GCC < 7.1 cannot compile code using `double` and -mpreferred-stack-boundary=3
-# (8B stack alignment).
-CFLAGS_$(AMDDALPATH)/dc/dcn301/dcn301_resource.o += -mpreferred-stack-boundary=4
-else
-CFLAGS_$(AMDDALPATH)/dc/dcn301/dcn301_resource.o += -msse2
-endif
-endif
-
AMD_DAL_DCN301 = $(addprefix $(AMDDALPATH)/dc/dcn301/,$(DCN301))
AMD_DISPLAY_FILES += $(AMD_DAL_DCN301)
diff --git a/drivers/gpu/drm/amd/display/dc/dcn301/dcn301_resource.c b/drivers/gpu/drm/amd/display/dc/dcn301/dcn301_resource.c
index 5350c93d7772..fbaa03f26d8b 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn301/dcn301_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn301/dcn301_resource.c
@@ -82,6 +82,7 @@
#include "dce/dce_i2c.h"
#include "dml/dcn30/display_mode_vba_30.h"
+#include "dml/dcn301/dcn301_fpu.h"
#include "vm_helper.h"
#include "dcn20/dcn20_vmid.h"
#include "amdgpu_socbb.h"
@@ -91,184 +92,6 @@
#define DC_LOGGER_INIT(logger)
-struct _vcs_dpi_ip_params_st dcn3_01_ip = {
- .odm_capable = 1,
- .gpuvm_enable = 1,
- .hostvm_enable = 1,
- .gpuvm_max_page_table_levels = 1,
- .hostvm_max_page_table_levels = 2,
- .hostvm_cached_page_table_levels = 0,
- .pte_group_size_bytes = 2048,
- .num_dsc = 3,
- .rob_buffer_size_kbytes = 184,
- .det_buffer_size_kbytes = 184,
- .dpte_buffer_size_in_pte_reqs_luma = 64,
- .dpte_buffer_size_in_pte_reqs_chroma = 32,
- .pde_proc_buffer_size_64k_reqs = 48,
- .dpp_output_buffer_pixels = 2560,
- .opp_output_buffer_lines = 1,
- .pixel_chunk_size_kbytes = 8,
- .meta_chunk_size_kbytes = 2,
- .writeback_chunk_size_kbytes = 8,
- .line_buffer_size_bits = 789504,
- .is_line_buffer_bpp_fixed = 0, // ?
- .line_buffer_fixed_bpp = 48, // ?
- .dcc_supported = true,
- .writeback_interface_buffer_size_kbytes = 90,
- .writeback_line_buffer_buffer_size = 656640,
- .max_line_buffer_lines = 12,
- .writeback_luma_buffer_size_kbytes = 12, // writeback_line_buffer_buffer_size = 656640
- .writeback_chroma_buffer_size_kbytes = 8,
- .writeback_chroma_line_buffer_width_pixels = 4,
- .writeback_max_hscl_ratio = 1,
- .writeback_max_vscl_ratio = 1,
- .writeback_min_hscl_ratio = 1,
- .writeback_min_vscl_ratio = 1,
- .writeback_max_hscl_taps = 1,
- .writeback_max_vscl_taps = 1,
- .writeback_line_buffer_luma_buffer_size = 0,
- .writeback_line_buffer_chroma_buffer_size = 14643,
- .cursor_buffer_size = 8,
- .cursor_chunk_size = 2,
- .max_num_otg = 4,
- .max_num_dpp = 4,
- .max_num_wb = 1,
- .max_dchub_pscl_bw_pix_per_clk = 4,
- .max_pscl_lb_bw_pix_per_clk = 2,
- .max_lb_vscl_bw_pix_per_clk = 4,
- .max_vscl_hscl_bw_pix_per_clk = 4,
- .max_hscl_ratio = 6,
- .max_vscl_ratio = 6,
- .hscl_mults = 4,
- .vscl_mults = 4,
- .max_hscl_taps = 8,
- .max_vscl_taps = 8,
- .dispclk_ramp_margin_percent = 1,
- .underscan_factor = 1.11,
- .min_vblank_lines = 32,
- .dppclk_delay_subtotal = 46,
- .dynamic_metadata_vm_enabled = true,
- .dppclk_delay_scl_lb_only = 16,
- .dppclk_delay_scl = 50,
- .dppclk_delay_cnvc_formatter = 27,
- .dppclk_delay_cnvc_cursor = 6,
- .dispclk_delay_subtotal = 119,
- .dcfclk_cstate_latency = 5.2, // SRExitTime
- .max_inter_dcn_tile_repeaters = 8,
- .max_num_hdmi_frl_outputs = 0,
- .odm_combine_4to1_supported = true,
-
- .xfc_supported = false,
- .xfc_fill_bw_overhead_percent = 10.0,
- .xfc_fill_constant_bytes = 0,
- .gfx7_compat_tiling_supported = 0,
- .number_of_cursors = 1,
-};
-
-struct _vcs_dpi_soc_bounding_box_st dcn3_01_soc = {
- .clock_limits = {
- {
- .state = 0,
- .dram_speed_mts = 2400.0,
- .fabricclk_mhz = 600,
- .socclk_mhz = 278.0,
- .dcfclk_mhz = 400.0,
- .dscclk_mhz = 206.0,
- .dppclk_mhz = 1015.0,
- .dispclk_mhz = 1015.0,
- .phyclk_mhz = 600.0,
- },
- {
- .state = 1,
- .dram_speed_mts = 2400.0,
- .fabricclk_mhz = 688,
- .socclk_mhz = 278.0,
- .dcfclk_mhz = 400.0,
- .dscclk_mhz = 206.0,
- .dppclk_mhz = 1015.0,
- .dispclk_mhz = 1015.0,
- .phyclk_mhz = 600.0,
- },
- {
- .state = 2,
- .dram_speed_mts = 4267.0,
- .fabricclk_mhz = 1067,
- .socclk_mhz = 278.0,
- .dcfclk_mhz = 608.0,
- .dscclk_mhz = 296.0,
- .dppclk_mhz = 1015.0,
- .dispclk_mhz = 1015.0,
- .phyclk_mhz = 810.0,
- },
-
- {
- .state = 3,
- .dram_speed_mts = 4267.0,
- .fabricclk_mhz = 1067,
- .socclk_mhz = 715.0,
- .dcfclk_mhz = 676.0,
- .dscclk_mhz = 338.0,
- .dppclk_mhz = 1015.0,
- .dispclk_mhz = 1015.0,
- .phyclk_mhz = 810.0,
- },
-
- {
- .state = 4,
- .dram_speed_mts = 4267.0,
- .fabricclk_mhz = 1067,
- .socclk_mhz = 953.0,
- .dcfclk_mhz = 810.0,
- .dscclk_mhz = 338.0,
- .dppclk_mhz = 1015.0,
- .dispclk_mhz = 1015.0,
- .phyclk_mhz = 810.0,
- },
- },
-
- .sr_exit_time_us = 9.0,
- .sr_enter_plus_exit_time_us = 11.0,
- .urgent_latency_us = 4.0,
- .urgent_latency_pixel_data_only_us = 4.0,
- .urgent_latency_pixel_mixed_with_vm_data_us = 4.0,
- .urgent_latency_vm_data_only_us = 4.0,
- .urgent_out_of_order_return_per_channel_pixel_only_bytes = 4096,
- .urgent_out_of_order_return_per_channel_pixel_and_vm_bytes = 4096,
- .urgent_out_of_order_return_per_channel_vm_only_bytes = 4096,
- .pct_ideal_dram_sdp_bw_after_urgent_pixel_only = 80.0,
- .pct_ideal_dram_sdp_bw_after_urgent_pixel_and_vm = 75.0,
- .pct_ideal_dram_sdp_bw_after_urgent_vm_only = 40.0,
- .max_avg_sdp_bw_use_normal_percent = 60.0,
- .max_avg_dram_bw_use_normal_percent = 60.0,
- .writeback_latency_us = 12.0,
- .max_request_size_bytes = 256,
- .dram_channel_width_bytes = 4,
- .fabric_datapath_to_dcn_data_return_bytes = 32,
- .dcn_downspread_percent = 0.5,
- .downspread_percent = 0.38,
- .dram_page_open_time_ns = 50.0,
- .dram_rw_turnaround_time_ns = 17.5,
- .dram_return_buffer_per_channel_bytes = 8192,
- .round_trip_ping_latency_dcfclk_cycles = 191,
- .urgent_out_of_order_return_per_channel_bytes = 4096,
- .channel_interleave_bytes = 256,
- .num_banks = 8,
- .num_chans = 4,
- .gpuvm_min_page_size_bytes = 4096,
- .hostvm_min_page_size_bytes = 4096,
- .dram_clock_change_latency_us = 23.84,
- .writeback_dram_clock_change_latency_us = 23.0,
- .return_bus_width_bytes = 64,
- .dispclk_dppclk_vco_speed_mhz = 3550,
- .xfc_bus_transport_time_us = 20, // ?
- .xfc_xbuf_latency_tolerance_us = 4, // ?
- .use_urgent_burst_bw = 1, // ?
- .num_states = 5,
- .do_urgent_latency_adjustment = false,
- .urgent_latency_adjustment_fabric_clock_component_us = 0,
- .urgent_latency_adjustment_fabric_clock_reference_mhz = 0,
-};
-
enum dcn301_clk_src_array_id {
DCN301_CLK_SRC_PLL0,
DCN301_CLK_SRC_PLL1,
@@ -1480,8 +1303,6 @@ static struct dc_cap_funcs cap_funcs = {
.get_dcc_compression_cap = dcn20_get_dcc_compression_cap
};
-#define fixed16_to_double(x) (((double) x) / ((double) (1 << 16)))
-#define fixed16_to_double_to_cpu(x) fixed16_to_double(le32_to_cpu(x))
static bool is_soc_bounding_box_valid(struct dc *dc)
{
@@ -1508,26 +1329,24 @@ static bool init_soc_bounding_box(struct dc *dc,
loaded_ip->max_num_otg = pool->base.res_cap->num_timing_generator;
loaded_ip->max_num_dpp = pool->base.pipe_count;
+ DC_FP_START();
dcn20_patch_bounding_box(dc, loaded_bb);
+ DC_FP_END();
if (dc->ctx->dc_bios->funcs->get_soc_bb_info) {
struct bp_soc_bb_info bb_info = {0};
if (dc->ctx->dc_bios->funcs->get_soc_bb_info(dc->ctx->dc_bios, &bb_info) == BP_RESULT_OK) {
- if (bb_info.dram_clock_change_latency_100ns > 0)
- dcn3_01_soc.dram_clock_change_latency_us = bb_info.dram_clock_change_latency_100ns * 10;
-
- if (bb_info.dram_sr_enter_exit_latency_100ns > 0)
- dcn3_01_soc.sr_enter_plus_exit_time_us = bb_info.dram_sr_enter_exit_latency_100ns * 10;
-
- if (bb_info.dram_sr_exit_latency_100ns > 0)
- dcn3_01_soc.sr_exit_time_us = bb_info.dram_sr_exit_latency_100ns * 10;
+ DC_FP_START();
+ dcn301_fpu_init_soc_bounding_box(bb_info);
+ DC_FP_END();
}
}
return true;
}
+
static void set_wm_ranges(
struct pp_smu_funcs *pp_smu,
struct _vcs_dpi_soc_bounding_box_st *loaded_bb)
@@ -1550,9 +1369,9 @@ static void set_wm_ranges(
ranges.reader_wm_sets[i].wm_inst = i;
ranges.reader_wm_sets[i].min_drain_clk_mhz = PP_SMU_WM_SET_RANGE_CLK_UNCONSTRAINED_MIN;
ranges.reader_wm_sets[i].max_drain_clk_mhz = PP_SMU_WM_SET_RANGE_CLK_UNCONSTRAINED_MAX;
- ranges.reader_wm_sets[i].min_fill_clk_mhz = (i > 0) ? (loaded_bb->clock_limits[i - 1].dram_speed_mts / 16) + 1 : 0;
- ranges.reader_wm_sets[i].max_fill_clk_mhz = loaded_bb->clock_limits[i].dram_speed_mts / 16;
-
+ DC_FP_START();
+ dcn301_fpu_set_wm_ranges(i, &ranges, loaded_bb);
+ DC_FP_END();
ranges.num_reader_wm_sets = i + 1;
}
@@ -1572,154 +1391,6 @@ static void set_wm_ranges(
pp_smu->nv_funcs.set_wm_ranges(&pp_smu->nv_funcs.pp_smu, &ranges);
}
-static void dcn301_update_bw_bounding_box(struct dc *dc, struct clk_bw_params *bw_params)
-{
- struct dcn301_resource_pool *pool = TO_DCN301_RES_POOL(dc->res_pool);
- struct clk_limit_table *clk_table = &bw_params->clk_table;
- struct _vcs_dpi_voltage_scaling_st clock_limits[DC__VOLTAGE_STATES];
- unsigned int i, closest_clk_lvl;
- int j;
-
- // Default clock levels are used for diags, which may lead to overclocking.
- if (!IS_DIAG_DC(dc->ctx->dce_environment)) {
- dcn3_01_ip.max_num_otg = pool->base.res_cap->num_timing_generator;
- dcn3_01_ip.max_num_dpp = pool->base.pipe_count;
- dcn3_01_soc.num_chans = bw_params->num_channels;
-
- ASSERT(clk_table->num_entries);
- for (i = 0; i < clk_table->num_entries; i++) {
- /* loop backwards*/
- for (closest_clk_lvl = 0, j = dcn3_01_soc.num_states - 1; j >= 0; j--) {
- if ((unsigned int) dcn3_01_soc.clock_limits[j].dcfclk_mhz <= clk_table->entries[i].dcfclk_mhz) {
- closest_clk_lvl = j;
- break;
- }
- }
-
- clock_limits[i].state = i;
- clock_limits[i].dcfclk_mhz = clk_table->entries[i].dcfclk_mhz;
- clock_limits[i].fabricclk_mhz = clk_table->entries[i].fclk_mhz;
- clock_limits[i].socclk_mhz = clk_table->entries[i].socclk_mhz;
- clock_limits[i].dram_speed_mts = clk_table->entries[i].memclk_mhz * 2;
-
- clock_limits[i].dispclk_mhz = dcn3_01_soc.clock_limits[closest_clk_lvl].dispclk_mhz;
- clock_limits[i].dppclk_mhz = dcn3_01_soc.clock_limits[closest_clk_lvl].dppclk_mhz;
- clock_limits[i].dram_bw_per_chan_gbps = dcn3_01_soc.clock_limits[closest_clk_lvl].dram_bw_per_chan_gbps;
- clock_limits[i].dscclk_mhz = dcn3_01_soc.clock_limits[closest_clk_lvl].dscclk_mhz;
- clock_limits[i].dtbclk_mhz = dcn3_01_soc.clock_limits[closest_clk_lvl].dtbclk_mhz;
- clock_limits[i].phyclk_d18_mhz = dcn3_01_soc.clock_limits[closest_clk_lvl].phyclk_d18_mhz;
- clock_limits[i].phyclk_mhz = dcn3_01_soc.clock_limits[closest_clk_lvl].phyclk_mhz;
- }
- for (i = 0; i < clk_table->num_entries; i++)
- dcn3_01_soc.clock_limits[i] = clock_limits[i];
- if (clk_table->num_entries) {
- dcn3_01_soc.num_states = clk_table->num_entries;
- /* duplicate last level */
- dcn3_01_soc.clock_limits[dcn3_01_soc.num_states] = dcn3_01_soc.clock_limits[dcn3_01_soc.num_states - 1];
- dcn3_01_soc.clock_limits[dcn3_01_soc.num_states].state = dcn3_01_soc.num_states;
- }
- }
-
- dcn3_01_soc.dispclk_dppclk_vco_speed_mhz = dc->clk_mgr->dentist_vco_freq_khz / 1000.0;
- dc->dml.soc.dispclk_dppclk_vco_speed_mhz = dc->clk_mgr->dentist_vco_freq_khz / 1000.0;
-
- dml_init_instance(&dc->dml, &dcn3_01_soc, &dcn3_01_ip, DML_PROJECT_DCN30);
-}
-
-static void calculate_wm_set_for_vlevel(
- int vlevel,
- struct wm_range_table_entry *table_entry,
- struct dcn_watermarks *wm_set,
- struct display_mode_lib *dml,
- display_e2e_pipe_params_st *pipes,
- int pipe_cnt)
-{
- double dram_clock_change_latency_cached = dml->soc.dram_clock_change_latency_us;
-
- ASSERT(vlevel < dml->soc.num_states);
- /* only pipe 0 is read for voltage and dcf/soc clocks */
- pipes[0].clks_cfg.voltage = vlevel;
- pipes[0].clks_cfg.dcfclk_mhz = dml->soc.clock_limits[vlevel].dcfclk_mhz;
- pipes[0].clks_cfg.socclk_mhz = dml->soc.clock_limits[vlevel].socclk_mhz;
-
- dml->soc.dram_clock_change_latency_us = table_entry->pstate_latency_us;
- dml->soc.sr_exit_time_us = table_entry->sr_exit_time_us;
- dml->soc.sr_enter_plus_exit_time_us = table_entry->sr_enter_plus_exit_time_us;
-
- wm_set->urgent_ns = get_wm_urgent(dml, pipes, pipe_cnt) * 1000;
- wm_set->cstate_pstate.cstate_enter_plus_exit_ns = get_wm_stutter_enter_exit(dml, pipes, pipe_cnt) * 1000;
- wm_set->cstate_pstate.cstate_exit_ns = get_wm_stutter_exit(dml, pipes, pipe_cnt) * 1000;
- wm_set->cstate_pstate.pstate_change_ns = get_wm_dram_clock_change(dml, pipes, pipe_cnt) * 1000;
- wm_set->pte_meta_urgent_ns = get_wm_memory_trip(dml, pipes, pipe_cnt) * 1000;
- wm_set->frac_urg_bw_nom = get_fraction_of_urgent_bandwidth(dml, pipes, pipe_cnt) * 1000;
- wm_set->frac_urg_bw_flip = get_fraction_of_urgent_bandwidth_imm_flip(dml, pipes, pipe_cnt) * 1000;
- wm_set->urgent_latency_ns = get_urgent_latency(dml, pipes, pipe_cnt) * 1000;
- dml->soc.dram_clock_change_latency_us = dram_clock_change_latency_cached;
-
-}
-
-static void dcn301_calculate_wm_and_dlg(
- struct dc *dc, struct dc_state *context,
- display_e2e_pipe_params_st *pipes,
- int pipe_cnt,
- int vlevel_req)
-{
- int i, pipe_idx;
- int vlevel, vlevel_max;
- struct wm_range_table_entry *table_entry;
- struct clk_bw_params *bw_params = dc->clk_mgr->bw_params;
-
- ASSERT(bw_params);
-
- vlevel_max = bw_params->clk_table.num_entries - 1;
-
- /* WM Set D */
- table_entry = &bw_params->wm_table.entries[WM_D];
- if (table_entry->wm_type == WM_TYPE_RETRAINING)
- vlevel = 0;
- else
- vlevel = vlevel_max;
- calculate_wm_set_for_vlevel(vlevel, table_entry, &context->bw_ctx.bw.dcn.watermarks.d,
- &context->bw_ctx.dml, pipes, pipe_cnt);
- /* WM Set C */
- table_entry = &bw_params->wm_table.entries[WM_C];
- vlevel = min(max(vlevel_req, 2), vlevel_max);
- calculate_wm_set_for_vlevel(vlevel, table_entry, &context->bw_ctx.bw.dcn.watermarks.c,
- &context->bw_ctx.dml, pipes, pipe_cnt);
- /* WM Set B */
- table_entry = &bw_params->wm_table.entries[WM_B];
- vlevel = min(max(vlevel_req, 1), vlevel_max);
- calculate_wm_set_for_vlevel(vlevel, table_entry, &context->bw_ctx.bw.dcn.watermarks.b,
- &context->bw_ctx.dml, pipes, pipe_cnt);
-
- /* WM Set A */
- table_entry = &bw_params->wm_table.entries[WM_A];
- vlevel = min(vlevel_req, vlevel_max);
- calculate_wm_set_for_vlevel(vlevel, table_entry, &context->bw_ctx.bw.dcn.watermarks.a,
- &context->bw_ctx.dml, pipes, pipe_cnt);
-
- for (i = 0, pipe_idx = 0; i < dc->res_pool->pipe_count; i++) {
- if (!context->res_ctx.pipe_ctx[i].stream)
- continue;
-
- pipes[pipe_idx].clks_cfg.dispclk_mhz = get_dispclk_calculated(&context->bw_ctx.dml, pipes, pipe_cnt);
- pipes[pipe_idx].clks_cfg.dppclk_mhz = get_dppclk_calculated(&context->bw_ctx.dml, pipes, pipe_cnt, pipe_idx);
-
- if (dc->config.forced_clocks) {
- pipes[pipe_idx].clks_cfg.dispclk_mhz = context->bw_ctx.dml.soc.clock_limits[0].dispclk_mhz;
- pipes[pipe_idx].clks_cfg.dppclk_mhz = context->bw_ctx.dml.soc.clock_limits[0].dppclk_mhz;
- }
- if (dc->debug.min_disp_clk_khz > pipes[pipe_idx].clks_cfg.dispclk_mhz * 1000)
- pipes[pipe_idx].clks_cfg.dispclk_mhz = dc->debug.min_disp_clk_khz / 1000.0;
- if (dc->debug.min_dpp_clk_khz > pipes[pipe_idx].clks_cfg.dppclk_mhz * 1000)
- pipes[pipe_idx].clks_cfg.dppclk_mhz = dc->debug.min_dpp_clk_khz / 1000.0;
-
- pipe_idx++;
- }
-
- dcn20_calculate_dlg_params(dc, context, pipes, pipe_cnt, vlevel);
-}
-
static struct resource_funcs dcn301_res_pool_funcs = {
.destroy = dcn301_destroy_resource_pool,
.link_enc_create = dcn301_link_encoder_create,
diff --git a/drivers/gpu/drm/amd/display/dc/dcn301/dcn301_resource.h b/drivers/gpu/drm/amd/display/dc/dcn301/dcn301_resource.h
index 17e4e91ff4b8..ae8672680cdd 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn301/dcn301_resource.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn301/dcn301_resource.h
@@ -32,6 +32,9 @@ struct dc;
struct resource_pool;
struct _vcs_dpi_display_pipe_params_st;
+extern struct _vcs_dpi_ip_params_st dcn3_01_ip;
+extern struct _vcs_dpi_soc_bounding_box_st dcn3_01_soc;
+
struct dcn301_resource_pool {
struct resource_pool base;
};
diff --git a/drivers/gpu/drm/amd/display/dc/dcn303/dcn303_resource.c b/drivers/gpu/drm/amd/display/dc/dcn303/dcn303_resource.c
index 2ce6eae7535d..4a9b64023675 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn303/dcn303_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn303/dcn303_resource.c
@@ -1344,6 +1344,20 @@ void dcn303_update_bw_bounding_box(struct dc *dc, struct clk_bw_params *bw_param
dcn3_03_soc.clock_limits[i].phyclk_d18_mhz = dcn3_03_soc.clock_limits[0].phyclk_d18_mhz;
dcn3_03_soc.clock_limits[i].dscclk_mhz = dcn3_03_soc.clock_limits[0].dscclk_mhz;
}
+
+ // WA: patch strobe modes to compensate for DCN303 BW issue
+ if (dcn3_03_soc.num_chans <= 4) {
+ for (i = 0; i < dcn3_03_soc.num_states; i++) {
+ if (dcn3_03_soc.clock_limits[i].dram_speed_mts > 1700)
+ break;
+
+ if (dcn3_03_soc.clock_limits[i].dram_speed_mts >= 1500) {
+ dcn3_03_soc.clock_limits[i].dcfclk_mhz = 100;
+ dcn3_03_soc.clock_limits[i].fabricclk_mhz = 100;
+ }
+ }
+ }
+
/* re-init DML with updated bb */
dml_init_instance(&dc->dml, &dcn3_03_soc, &dcn3_03_ip, DML_PROJECT_DCN30);
if (dc->current_state)
diff --git a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_apg.c b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_apg.c
index 6bd7a0626665..de5e18c2a3ac 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_apg.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_apg.c
@@ -129,7 +129,7 @@ static void apg31_se_audio_setup(
/* When running in "pair mode", pairs of audio channels have their own enable
* this is for really old audio drivers */
- REG_UPDATE(APG_DBG_GEN_CONTROL, APG_DBG_AUDIO_CHANNEL_ENABLE, 0xF);
+ REG_UPDATE(APG_DBG_GEN_CONTROL, APG_DBG_AUDIO_CHANNEL_ENABLE, 0xFF);
// REG_UPDATE(APG_DBG_GEN_CONTROL, APG_DBG_AUDIO_CHANNEL_ENABLE, channels);
/* Disable forced mem power off */
diff --git a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_dccg.c b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_dccg.c
index 9896adf67425..815481a3ef54 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_dccg.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_dccg.c
@@ -26,6 +26,7 @@
#include "reg_helper.h"
#include "core_types.h"
#include "dcn31_dccg.h"
+#include "dal_asic_id.h"
#define TO_DCN_DCCG(dccg)\
container_of(dccg, struct dcn_dccg, base)
@@ -42,10 +43,58 @@
#define DC_LOGGER \
dccg->ctx->logger
-void dccg31_set_dpstreamclk(
- struct dccg *dccg,
- enum hdmistreamclk_source src,
- int otg_inst)
+static void dccg31_update_dpp_dto(struct dccg *dccg, int dpp_inst, int req_dppclk)
+{
+ struct dcn_dccg *dccg_dcn = TO_DCN_DCCG(dccg);
+
+ if (dccg->ref_dppclk && req_dppclk) {
+ int ref_dppclk = dccg->ref_dppclk;
+ int modulo, phase;
+
+ // phase / modulo = dpp pipe clk / dpp global clk
+ modulo = 0xff; // use FF at the end
+ phase = ((modulo * req_dppclk) + ref_dppclk - 1) / ref_dppclk;
+
+ if (phase > 0xff) {
+ ASSERT(false);
+ phase = 0xff;
+ }
+
+ REG_SET_2(DPPCLK_DTO_PARAM[dpp_inst], 0,
+ DPPCLK0_DTO_PHASE, phase,
+ DPPCLK0_DTO_MODULO, modulo);
+ REG_UPDATE(DPPCLK_DTO_CTRL,
+ DPPCLK_DTO_ENABLE[dpp_inst], 1);
+ } else {
+ //DTO must be enabled to generate a 0Hz clock output
+ if (dccg->ctx->dc->debug.root_clock_optimization.bits.dpp) {
+ REG_UPDATE(DPPCLK_DTO_CTRL,
+ DPPCLK_DTO_ENABLE[dpp_inst], 1);
+ REG_SET_2(DPPCLK_DTO_PARAM[dpp_inst], 0,
+ DPPCLK0_DTO_PHASE, 0,
+ DPPCLK0_DTO_MODULO, 1);
+ } else {
+ REG_UPDATE(DPPCLK_DTO_CTRL,
+ DPPCLK_DTO_ENABLE[dpp_inst], 0);
+ }
+ }
+ dccg->pipe_dppclk_khz[dpp_inst] = req_dppclk;
+}
+
+static enum phyd32clk_clock_source get_phy_mux_symclk(
+ struct dcn_dccg *dccg_dcn,
+ enum phyd32clk_clock_source src)
+{
+ if (dccg_dcn->base.ctx->asic_id.hw_internal_rev == YELLOW_CARP_B0) {
+ if (src == PHYD32CLKC)
+ src = PHYD32CLKF;
+ if (src == PHYD32CLKD)
+ src = PHYD32CLKG;
+ }
+ return src;
+}
+
+static void dccg31_enable_dpstreamclk(struct dccg *dccg, int otg_inst)
{
struct dcn_dccg *dccg_dcn = TO_DCN_DCCG(dccg);
@@ -53,19 +102,53 @@ void dccg31_set_dpstreamclk(
switch (otg_inst) {
case 0:
REG_UPDATE(DPSTREAMCLK_CNTL,
- DPSTREAMCLK_PIPE0_EN, (src == REFCLK) ? 0 : 1);
+ DPSTREAMCLK_PIPE0_EN, 1);
+ break;
+ case 1:
+ REG_UPDATE(DPSTREAMCLK_CNTL,
+ DPSTREAMCLK_PIPE1_EN, 1);
+ break;
+ case 2:
+ REG_UPDATE(DPSTREAMCLK_CNTL,
+ DPSTREAMCLK_PIPE2_EN, 1);
+ break;
+ case 3:
+ REG_UPDATE(DPSTREAMCLK_CNTL,
+ DPSTREAMCLK_PIPE3_EN, 1);
+ break;
+ default:
+ BREAK_TO_DEBUGGER();
+ return;
+ }
+ if (dccg->ctx->dc->debug.root_clock_optimization.bits.dpstream)
+ REG_UPDATE(DCCG_GATE_DISABLE_CNTL3,
+ DPSTREAMCLK_ROOT_GATE_DISABLE, 1);
+}
+
+static void dccg31_disable_dpstreamclk(struct dccg *dccg, int otg_inst)
+{
+ struct dcn_dccg *dccg_dcn = TO_DCN_DCCG(dccg);
+
+ if (dccg->ctx->dc->debug.root_clock_optimization.bits.dpstream)
+ REG_UPDATE(DCCG_GATE_DISABLE_CNTL3,
+ DPSTREAMCLK_ROOT_GATE_DISABLE, 0);
+
+ switch (otg_inst) {
+ case 0:
+ REG_UPDATE(DPSTREAMCLK_CNTL,
+ DPSTREAMCLK_PIPE0_EN, 0);
break;
case 1:
REG_UPDATE(DPSTREAMCLK_CNTL,
- DPSTREAMCLK_PIPE1_EN, (src == REFCLK) ? 0 : 1);
+ DPSTREAMCLK_PIPE1_EN, 0);
break;
case 2:
REG_UPDATE(DPSTREAMCLK_CNTL,
- DPSTREAMCLK_PIPE2_EN, (src == REFCLK) ? 0 : 1);
+ DPSTREAMCLK_PIPE2_EN, 0);
break;
case 3:
REG_UPDATE(DPSTREAMCLK_CNTL,
- DPSTREAMCLK_PIPE3_EN, (src == REFCLK) ? 0 : 1);
+ DPSTREAMCLK_PIPE3_EN, 0);
break;
default:
BREAK_TO_DEBUGGER();
@@ -73,6 +156,17 @@ void dccg31_set_dpstreamclk(
}
}
+void dccg31_set_dpstreamclk(
+ struct dccg *dccg,
+ enum hdmistreamclk_source src,
+ int otg_inst)
+{
+ if (src == REFCLK)
+ dccg31_disable_dpstreamclk(dccg, otg_inst);
+ else
+ dccg31_enable_dpstreamclk(dccg, otg_inst);
+}
+
void dccg31_enable_symclk32_se(
struct dccg *dccg,
int hpo_se_inst,
@@ -80,24 +174,38 @@ void dccg31_enable_symclk32_se(
{
struct dcn_dccg *dccg_dcn = TO_DCN_DCCG(dccg);
+ phyd32clk = get_phy_mux_symclk(dccg_dcn, phyd32clk);
+
/* select one of the PHYD32CLKs as the source for symclk32_se */
switch (hpo_se_inst) {
case 0:
+ if (dccg->ctx->dc->debug.root_clock_optimization.bits.symclk32_se)
+ REG_UPDATE(DCCG_GATE_DISABLE_CNTL3,
+ SYMCLK32_ROOT_SE0_GATE_DISABLE, 1);
REG_UPDATE_2(SYMCLK32_SE_CNTL,
SYMCLK32_SE0_SRC_SEL, phyd32clk,
SYMCLK32_SE0_EN, 1);
break;
case 1:
+ if (dccg->ctx->dc->debug.root_clock_optimization.bits.symclk32_se)
+ REG_UPDATE(DCCG_GATE_DISABLE_CNTL3,
+ SYMCLK32_ROOT_SE1_GATE_DISABLE, 1);
REG_UPDATE_2(SYMCLK32_SE_CNTL,
SYMCLK32_SE1_SRC_SEL, phyd32clk,
SYMCLK32_SE1_EN, 1);
break;
case 2:
+ if (dccg->ctx->dc->debug.root_clock_optimization.bits.symclk32_se)
+ REG_UPDATE(DCCG_GATE_DISABLE_CNTL3,
+ SYMCLK32_ROOT_SE2_GATE_DISABLE, 1);
REG_UPDATE_2(SYMCLK32_SE_CNTL,
SYMCLK32_SE2_SRC_SEL, phyd32clk,
SYMCLK32_SE2_EN, 1);
break;
case 3:
+ if (dccg->ctx->dc->debug.root_clock_optimization.bits.symclk32_se)
+ REG_UPDATE(DCCG_GATE_DISABLE_CNTL3,
+ SYMCLK32_ROOT_SE3_GATE_DISABLE, 1);
REG_UPDATE_2(SYMCLK32_SE_CNTL,
SYMCLK32_SE3_SRC_SEL, phyd32clk,
SYMCLK32_SE3_EN, 1);
@@ -120,21 +228,33 @@ void dccg31_disable_symclk32_se(
REG_UPDATE_2(SYMCLK32_SE_CNTL,
SYMCLK32_SE0_SRC_SEL, 0,
SYMCLK32_SE0_EN, 0);
+ if (dccg->ctx->dc->debug.root_clock_optimization.bits.symclk32_se)
+ REG_UPDATE(DCCG_GATE_DISABLE_CNTL3,
+ SYMCLK32_ROOT_SE0_GATE_DISABLE, 0);
break;
case 1:
REG_UPDATE_2(SYMCLK32_SE_CNTL,
SYMCLK32_SE1_SRC_SEL, 0,
SYMCLK32_SE1_EN, 0);
+ if (dccg->ctx->dc->debug.root_clock_optimization.bits.symclk32_se)
+ REG_UPDATE(DCCG_GATE_DISABLE_CNTL3,
+ SYMCLK32_ROOT_SE1_GATE_DISABLE, 0);
break;
case 2:
REG_UPDATE_2(SYMCLK32_SE_CNTL,
SYMCLK32_SE2_SRC_SEL, 0,
SYMCLK32_SE2_EN, 0);
+ if (dccg->ctx->dc->debug.root_clock_optimization.bits.symclk32_se)
+ REG_UPDATE(DCCG_GATE_DISABLE_CNTL3,
+ SYMCLK32_ROOT_SE2_GATE_DISABLE, 0);
break;
case 3:
REG_UPDATE_2(SYMCLK32_SE_CNTL,
SYMCLK32_SE3_SRC_SEL, 0,
SYMCLK32_SE3_EN, 0);
+ if (dccg->ctx->dc->debug.root_clock_optimization.bits.symclk32_se)
+ REG_UPDATE(DCCG_GATE_DISABLE_CNTL3,
+ SYMCLK32_ROOT_SE3_GATE_DISABLE, 0);
break;
default:
BREAK_TO_DEBUGGER();
@@ -149,14 +269,22 @@ void dccg31_enable_symclk32_le(
{
struct dcn_dccg *dccg_dcn = TO_DCN_DCCG(dccg);
+ phyd32clk = get_phy_mux_symclk(dccg_dcn, phyd32clk);
+
/* select one of the PHYD32CLKs as the source for symclk32_le */
switch (hpo_le_inst) {
case 0:
+ if (dccg->ctx->dc->debug.root_clock_optimization.bits.symclk32_le)
+ REG_UPDATE(DCCG_GATE_DISABLE_CNTL3,
+ SYMCLK32_ROOT_LE0_GATE_DISABLE, 1);
REG_UPDATE_2(SYMCLK32_LE_CNTL,
SYMCLK32_LE0_SRC_SEL, phyd32clk,
SYMCLK32_LE0_EN, 1);
break;
case 1:
+ if (dccg->ctx->dc->debug.root_clock_optimization.bits.symclk32_le)
+ REG_UPDATE(DCCG_GATE_DISABLE_CNTL3,
+ SYMCLK32_ROOT_LE1_GATE_DISABLE, 1);
REG_UPDATE_2(SYMCLK32_LE_CNTL,
SYMCLK32_LE1_SRC_SEL, phyd32clk,
SYMCLK32_LE1_EN, 1);
@@ -179,11 +307,87 @@ void dccg31_disable_symclk32_le(
REG_UPDATE_2(SYMCLK32_LE_CNTL,
SYMCLK32_LE0_SRC_SEL, 0,
SYMCLK32_LE0_EN, 0);
+ if (dccg->ctx->dc->debug.root_clock_optimization.bits.symclk32_le)
+ REG_UPDATE(DCCG_GATE_DISABLE_CNTL3,
+ SYMCLK32_ROOT_LE0_GATE_DISABLE, 0);
break;
case 1:
REG_UPDATE_2(SYMCLK32_LE_CNTL,
SYMCLK32_LE1_SRC_SEL, 0,
SYMCLK32_LE1_EN, 0);
+ if (dccg->ctx->dc->debug.root_clock_optimization.bits.symclk32_le)
+ REG_UPDATE(DCCG_GATE_DISABLE_CNTL3,
+ SYMCLK32_ROOT_LE1_GATE_DISABLE, 0);
+ break;
+ default:
+ BREAK_TO_DEBUGGER();
+ return;
+ }
+}
+
+static void dccg31_disable_dscclk(struct dccg *dccg, int inst)
+{
+ struct dcn_dccg *dccg_dcn = TO_DCN_DCCG(dccg);
+
+ if (!dccg->ctx->dc->debug.root_clock_optimization.bits.dsc)
+ return;
+ //DTO must be enabled to generate a 0 Hz clock output
+ switch (inst) {
+ case 0:
+ REG_UPDATE(DSCCLK_DTO_CTRL,
+ DSCCLK0_DTO_ENABLE, 1);
+ REG_UPDATE_2(DSCCLK0_DTO_PARAM,
+ DSCCLK0_DTO_PHASE, 0,
+ DSCCLK0_DTO_MODULO, 1);
+ break;
+ case 1:
+ REG_UPDATE(DSCCLK_DTO_CTRL,
+ DSCCLK1_DTO_ENABLE, 1);
+ REG_UPDATE_2(DSCCLK1_DTO_PARAM,
+ DSCCLK1_DTO_PHASE, 0,
+ DSCCLK1_DTO_MODULO, 1);
+ break;
+ case 2:
+ REG_UPDATE(DSCCLK_DTO_CTRL,
+ DSCCLK2_DTO_ENABLE, 1);
+ REG_UPDATE_2(DSCCLK2_DTO_PARAM,
+ DSCCLK2_DTO_PHASE, 0,
+ DSCCLK2_DTO_MODULO, 1);
+ break;
+ default:
+ BREAK_TO_DEBUGGER();
+ return;
+ }
+}
+
+static void dccg31_enable_dscclk(struct dccg *dccg, int inst)
+{
+ struct dcn_dccg *dccg_dcn = TO_DCN_DCCG(dccg);
+
+ if (!dccg->ctx->dc->debug.root_clock_optimization.bits.dsc)
+ return;
+ //Disable DTO
+ switch (inst) {
+ case 0:
+ REG_UPDATE_2(DSCCLK0_DTO_PARAM,
+ DSCCLK0_DTO_PHASE, 0,
+ DSCCLK0_DTO_MODULO, 0);
+ REG_UPDATE(DSCCLK_DTO_CTRL,
+ DSCCLK0_DTO_ENABLE, 0);
+ break;
+ case 1:
+ REG_UPDATE_2(DSCCLK1_DTO_PARAM,
+ DSCCLK1_DTO_PHASE, 0,
+ DSCCLK1_DTO_MODULO, 0);
+ REG_UPDATE(DSCCLK_DTO_CTRL,
+ DSCCLK1_DTO_ENABLE, 0);
+ break;
+ case 2:
+ REG_UPDATE_2(DSCCLK2_DTO_PARAM,
+ DSCCLK2_DTO_PHASE, 0,
+ DSCCLK2_DTO_MODULO, 0);
+ REG_UPDATE(DSCCLK_DTO_CTRL,
+ DSCCLK2_DTO_ENABLE, 0);
break;
default:
BREAK_TO_DEBUGGER();
@@ -398,10 +602,23 @@ void dccg31_init(struct dccg *dccg)
dccg31_disable_symclk32_se(dccg, 1);
dccg31_disable_symclk32_se(dccg, 2);
dccg31_disable_symclk32_se(dccg, 3);
+
+ if (dccg->ctx->dc->debug.root_clock_optimization.bits.symclk32_le) {
+ dccg31_disable_symclk32_le(dccg, 0);
+ dccg31_disable_symclk32_le(dccg, 1);
+ }
+
+ if (dccg->ctx->dc->debug.root_clock_optimization.bits.dpstream) {
+ dccg31_disable_dpstreamclk(dccg, 0);
+ dccg31_disable_dpstreamclk(dccg, 1);
+ dccg31_disable_dpstreamclk(dccg, 2);
+ dccg31_disable_dpstreamclk(dccg, 3);
+ }
+
}
static const struct dccg_funcs dccg31_funcs = {
- .update_dpp_dto = dccg2_update_dpp_dto,
+ .update_dpp_dto = dccg31_update_dpp_dto,
.get_dccg_ref_freq = dccg31_get_dccg_ref_freq,
.dccg_init = dccg31_init,
.set_dpstreamclk = dccg31_set_dpstreamclk,
@@ -413,6 +630,8 @@ static const struct dccg_funcs dccg31_funcs = {
.set_dtbclk_dto = dccg31_set_dtbclk_dto,
.set_audio_dtbclk_dto = dccg31_set_audio_dtbclk_dto,
.set_dispclk_change_mode = dccg31_set_dispclk_change_mode,
+ .disable_dsc = dccg31_disable_dscclk,
+ .enable_dsc = dccg31_enable_dscclk,
};
struct dccg *dccg31_create(
diff --git a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_dccg.h b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_dccg.h
index 1e5aabcb7799..a013a32bbaf7 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_dccg.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_dccg.h
@@ -61,7 +61,13 @@
SR(DCCG_AUDIO_DTBCLK_DTO_MODULO),\
SR(DCCG_AUDIO_DTBCLK_DTO_PHASE),\
SR(DCCG_AUDIO_DTO_SOURCE),\
- SR(DENTIST_DISPCLK_CNTL)
+ SR(DENTIST_DISPCLK_CNTL),\
+ SR(DSCCLK0_DTO_PARAM),\
+ SR(DSCCLK1_DTO_PARAM),\
+ SR(DSCCLK2_DTO_PARAM),\
+ SR(DSCCLK_DTO_CTRL),\
+ SR(DCCG_GATE_DISABLE_CNTL3),\
+ SR(HDMISTREAMCLK0_DTO_PARAM)
#define DCCG_MASK_SH_LIST_DCN31(mask_sh) \
@@ -119,7 +125,26 @@
DCCG_SFII(OTG, PIXEL_RATE_CNTL, DTBCLK_DTO, DIV, 3, mask_sh),\
DCCG_SF(DCCG_AUDIO_DTO_SOURCE, DCCG_AUDIO_DTO_SEL, mask_sh),\
DCCG_SF(DCCG_AUDIO_DTO_SOURCE, DCCG_AUDIO_DTO0_SOURCE_SEL, mask_sh),\
- DCCG_SF(DENTIST_DISPCLK_CNTL, DENTIST_DISPCLK_CHG_MODE, mask_sh)
+ DCCG_SF(DENTIST_DISPCLK_CNTL, DENTIST_DISPCLK_CHG_MODE, mask_sh), \
+ DCCG_SF(DSCCLK0_DTO_PARAM, DSCCLK0_DTO_PHASE, mask_sh),\
+ DCCG_SF(DSCCLK0_DTO_PARAM, DSCCLK0_DTO_MODULO, mask_sh),\
+ DCCG_SF(DSCCLK1_DTO_PARAM, DSCCLK1_DTO_PHASE, mask_sh),\
+ DCCG_SF(DSCCLK1_DTO_PARAM, DSCCLK1_DTO_MODULO, mask_sh),\
+ DCCG_SF(DSCCLK2_DTO_PARAM, DSCCLK2_DTO_PHASE, mask_sh),\
+ DCCG_SF(DSCCLK2_DTO_PARAM, DSCCLK2_DTO_MODULO, mask_sh),\
+ DCCG_SF(DSCCLK_DTO_CTRL, DSCCLK0_DTO_ENABLE, mask_sh),\
+ DCCG_SF(DSCCLK_DTO_CTRL, DSCCLK1_DTO_ENABLE, mask_sh),\
+ DCCG_SF(DSCCLK_DTO_CTRL, DSCCLK2_DTO_ENABLE, mask_sh),\
+ DCCG_SF(DCCG_GATE_DISABLE_CNTL3, DPSTREAMCLK_ROOT_GATE_DISABLE, mask_sh),\
+ DCCG_SF(DCCG_GATE_DISABLE_CNTL3, DPSTREAMCLK_GATE_DISABLE, mask_sh),\
+ DCCG_SF(DCCG_GATE_DISABLE_CNTL3, SYMCLK32_ROOT_SE0_GATE_DISABLE, mask_sh),\
+ DCCG_SF(DCCG_GATE_DISABLE_CNTL3, SYMCLK32_ROOT_SE1_GATE_DISABLE, mask_sh),\
+ DCCG_SF(DCCG_GATE_DISABLE_CNTL3, SYMCLK32_ROOT_SE2_GATE_DISABLE, mask_sh),\
+ DCCG_SF(DCCG_GATE_DISABLE_CNTL3, SYMCLK32_ROOT_SE3_GATE_DISABLE, mask_sh),\
+ DCCG_SF(DCCG_GATE_DISABLE_CNTL3, SYMCLK32_ROOT_LE0_GATE_DISABLE, mask_sh),\
+ DCCG_SF(DCCG_GATE_DISABLE_CNTL3, SYMCLK32_ROOT_LE1_GATE_DISABLE, mask_sh),\
+ DCCG_SF(HDMISTREAMCLK0_DTO_PARAM, HDMISTREAMCLK0_DTO_PHASE, mask_sh),\
+ DCCG_SF(HDMISTREAMCLK0_DTO_PARAM, HDMISTREAMCLK0_DTO_MODULO, mask_sh)
struct dccg *dccg31_create(
@@ -130,6 +155,11 @@ struct dccg *dccg31_create(
void dccg31_init(struct dccg *dccg);
+void dccg31_set_dpstreamclk(
+ struct dccg *dccg,
+ enum hdmistreamclk_source src,
+ int otg_inst);
+
void dccg31_enable_symclk32_se(
struct dccg *dccg,
int hpo_se_inst,
diff --git a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_dio_link_encoder.c b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_dio_link_encoder.c
index 8f8eee475144..ee6f13bef377 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_dio_link_encoder.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_dio_link_encoder.c
@@ -431,7 +431,7 @@ void dcn31_link_encoder_enable_dp_output(
if (link) {
dpia_control.dpia_id = link->ddc_hw_inst;
- dpia_control.fec_rdy = link->fec_state == dc_link_fec_ready ? 1 : 0;
+ dpia_control.fec_rdy = dc_link_should_enable_fec(link);
} else {
DC_LOG_ERROR("%s: Failed to execute DPIA enable DMUB command.\n", __func__);
BREAK_TO_DEBUGGER();
@@ -476,7 +476,7 @@ void dcn31_link_encoder_enable_dp_mst_output(
if (link) {
dpia_control.dpia_id = link->ddc_hw_inst;
- dpia_control.fec_rdy = link->fec_state == dc_link_fec_ready ? 1 : 0;
+ dpia_control.fec_rdy = dc_link_should_enable_fec(link);
} else {
DC_LOG_ERROR("%s: Failed to execute DPIA enable DMUB command.\n", __func__);
BREAK_TO_DEBUGGER();
diff --git a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hwseq.c b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hwseq.c
index 18e33ef3d217..5dd1ce9ddb53 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hwseq.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hwseq.c
@@ -49,6 +49,8 @@
#include "inc/link_dpcd.h"
#include "dcn10/dcn10_hw_sequencer.h"
#include "inc/link_enc_cfg.h"
+#include "dcn30/dcn30_vpg.h"
+#include "dce/dce_i2c_hw.h"
#define DC_LOGGER_INIT(logger)
@@ -64,6 +66,45 @@
#define FN(reg_name, field_name) \
hws->shifts->field_name, hws->masks->field_name
+static void enable_memory_low_power(struct dc *dc)
+{
+ struct dce_hwseq *hws = dc->hwseq;
+ int i;
+
+ if (dc->debug.enable_mem_low_power.bits.dmcu) {
+ // Force ERAM to shutdown if DMCU is not enabled
+ if (dc->debug.disable_dmcu || dc->config.disable_dmcu) {
+ REG_UPDATE(DMU_MEM_PWR_CNTL, DMCU_ERAM_MEM_PWR_FORCE, 3);
+ }
+ }
+
+ // Set default OPTC memory power states
+ if (dc->debug.enable_mem_low_power.bits.optc) {
+ // Shutdown when unassigned and light sleep in VBLANK
+ REG_SET_2(ODM_MEM_PWR_CTRL3, 0, ODM_MEM_UNASSIGNED_PWR_MODE, 3, ODM_MEM_VBLANK_PWR_MODE, 1);
+ }
+
+ if (dc->debug.enable_mem_low_power.bits.vga) {
+ // Power down VGA memory
+ REG_UPDATE(MMHUBBUB_MEM_PWR_CNTL, VGA_MEM_PWR_FORCE, 1);
+ }
+
+ if (dc->debug.enable_mem_low_power.bits.mpc)
+ dc->res_pool->mpc->funcs->set_mpc_mem_lp_mode(dc->res_pool->mpc);
+
+
+ if (dc->debug.enable_mem_low_power.bits.vpg && dc->res_pool->stream_enc[0]->vpg->funcs->vpg_powerdown) {
+ // Power down VPGs
+ for (i = 0; i < dc->res_pool->stream_enc_count; i++)
+ dc->res_pool->stream_enc[i]->vpg->funcs->vpg_powerdown(dc->res_pool->stream_enc[i]->vpg);
+#if defined(CONFIG_DRM_AMD_DC_DCN)
+ for (i = 0; i < dc->res_pool->hpo_dp_stream_enc_count; i++)
+ dc->res_pool->hpo_dp_stream_enc[i]->vpg->funcs->vpg_powerdown(dc->res_pool->hpo_dp_stream_enc[i]->vpg);
+#endif
+ }
+
+}
+
void dcn31_init_hw(struct dc *dc)
{
struct abm **abms = dc->res_pool->multiple_abms;
@@ -71,16 +112,11 @@ void dcn31_init_hw(struct dc *dc)
struct dc_bios *dcb = dc->ctx->dc_bios;
struct resource_pool *res_pool = dc->res_pool;
uint32_t backlight = MAX_BACKLIGHT_LEVEL;
- int i;
- int edp_num;
+ int i, j;
if (dc->clk_mgr && dc->clk_mgr->funcs->init_clocks)
dc->clk_mgr->funcs->init_clocks(dc->clk_mgr);
- // Initialize the dccg
- if (res_pool->dccg->funcs->dccg_init)
- res_pool->dccg->funcs->dccg_init(res_pool->dccg);
-
if (IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment)) {
REG_WRITE(REFCLK_CNTL, 0);
@@ -107,24 +143,11 @@ void dcn31_init_hw(struct dc *dc)
hws->funcs.bios_golden_init(dc);
hws->funcs.disable_vga(dc->hwseq);
}
+ // Initialize the dccg
+ if (res_pool->dccg->funcs->dccg_init)
+ res_pool->dccg->funcs->dccg_init(res_pool->dccg);
- if (dc->debug.enable_mem_low_power.bits.dmcu) {
- // Force ERAM to shutdown if DMCU is not enabled
- if (dc->debug.disable_dmcu || dc->config.disable_dmcu) {
- REG_UPDATE(DMU_MEM_PWR_CNTL, DMCU_ERAM_MEM_PWR_FORCE, 3);
- }
- }
-
- // Set default OPTC memory power states
- if (dc->debug.enable_mem_low_power.bits.optc) {
- // Shutdown when unassigned and light sleep in VBLANK
- REG_SET_2(ODM_MEM_PWR_CTRL3, 0, ODM_MEM_UNASSIGNED_PWR_MODE, 3, ODM_MEM_VBLANK_PWR_MODE, 1);
- }
-
- if (dc->debug.enable_mem_low_power.bits.vga) {
- // Power down VGA memory
- REG_UPDATE(MMHUBBUB_MEM_PWR_CNTL, VGA_MEM_PWR_FORCE, 1);
- }
+ enable_memory_low_power(dc);
if (dc->ctx->dc_bios->fw_info_valid) {
res_pool->ref_clocks.xtalin_clock_inKhz =
@@ -179,9 +202,40 @@ void dcn31_init_hw(struct dc *dc)
dmub_enable_outbox_notification(dc);
/* we want to turn off all dp displays before doing detection */
- if (dc->config.power_down_display_on_boot)
- blank_all_dp_displays(dc, true);
-
+ if (dc->config.power_down_display_on_boot) {
+ uint8_t dpcd_power_state = '\0';
+ enum dc_status status = DC_ERROR_UNEXPECTED;
+
+ for (i = 0; i < dc->link_count; i++) {
+ if (dc->links[i]->connector_signal != SIGNAL_TYPE_DISPLAY_PORT)
+ continue;
+
+ /* if any of the displays are lit up turn them off */
+ status = core_link_read_dpcd(dc->links[i], DP_SET_POWER,
+ &dpcd_power_state, sizeof(dpcd_power_state));
+ if (status == DC_OK && dpcd_power_state == DP_POWER_STATE_D0) {
+ /* blank dp stream before power off receiver*/
+ if (dc->links[i]->ep_type == DISPLAY_ENDPOINT_PHY &&
+ dc->links[i]->link_enc->funcs->get_dig_frontend) {
+ unsigned int fe;
+
+ fe = dc->links[i]->link_enc->funcs->get_dig_frontend(
+ dc->links[i]->link_enc);
+ if (fe == ENGINE_ID_UNKNOWN)
+ continue;
+
+ for (j = 0; j < dc->res_pool->stream_enc_count; j++) {
+ if (fe == dc->res_pool->stream_enc[j]->id) {
+ dc->res_pool->stream_enc[j]->funcs->dp_blank(dc->links[i],
+ dc->res_pool->stream_enc[j]);
+ break;
+ }
+ }
+ }
+ dp_receiver_power_ctrl(dc->links[i], false);
+ }
+ }
+ }
/* If taking control over from VBIOS, we may want to optimize our first
* mode set, so we need to skip powering down pipes until we know which
@@ -196,48 +250,6 @@ void dcn31_init_hw(struct dc *dc)
!dc->res_pool->hubbub->ctx->dc->debug.disable_stutter);
}
- /* In headless boot cases, DIG may be turned
- * on which causes HW/SW discrepancies.
- * To avoid this, power down hardware on boot
- * if DIG is turned on and seamless boot not enabled
- */
- if (dc->config.power_down_display_on_boot) {
- struct dc_link *edp_links[MAX_NUM_EDP];
- struct dc_link *edp_link;
- bool power_down = false;
-
- get_edp_links(dc, edp_links, &edp_num);
- if (edp_num) {
- for (i = 0; i < edp_num; i++) {
- edp_link = edp_links[i];
- if (edp_link->link_enc->funcs->is_dig_enabled &&
- edp_link->link_enc->funcs->is_dig_enabled(edp_link->link_enc) &&
- dc->hwss.edp_backlight_control &&
- dc->hwss.power_down &&
- dc->hwss.edp_power_control) {
- dc->hwss.edp_backlight_control(edp_link, false);
- dc->hwss.power_down(dc);
- dc->hwss.edp_power_control(edp_link, false);
- power_down = true;
- }
- }
- }
- if (!power_down) {
- for (i = 0; i < dc->link_count; i++) {
- struct dc_link *link = dc->links[i];
-
- if (link->ep_type == DISPLAY_ENDPOINT_PHY &&
- link->link_enc->funcs->is_dig_enabled &&
- link->link_enc->funcs->is_dig_enabled(link->link_enc) &&
- dc->hwss.power_down) {
- dc->hwss.power_down(dc);
- break;
- }
-
- }
- }
- }
-
for (i = 0; i < res_pool->audio_count; i++) {
struct audio *audio = res_pool->audios[i];
@@ -259,6 +271,13 @@ void dcn31_init_hw(struct dc *dc)
/* power AFMT HDMI memory TODO: may move to dis/en output save power*/
REG_WRITE(DIO_MEM_PWR_CTRL, 0);
+ // Set i2c to light sleep until engine is setup
+ if (dc->debug.enable_mem_low_power.bits.i2c)
+ REG_UPDATE(DIO_MEM_PWR_CTRL, I2C_LIGHT_SLEEP_FORCE, 1);
+
+ if (hws->funcs.setup_hpo_hw_control)
+ hws->funcs.setup_hpo_hw_control(hws, false);
+
if (!dc->debug.disable_clock_gate) {
/* enable all DCN clock gating */
REG_WRITE(DCCG_GATE_DISABLE_CNTL, 0);
@@ -300,6 +319,12 @@ void dcn31_dsc_pg_control(
if (hws->ctx->dc->debug.disable_dsc_power_gate)
return;
+ if (hws->ctx->dc->debug.root_clock_optimization.bits.dsc &&
+ hws->ctx->dc->res_pool->dccg->funcs->enable_dsc &&
+ power_on)
+ hws->ctx->dc->res_pool->dccg->funcs->enable_dsc(
+ hws->ctx->dc->res_pool->dccg, dsc_inst);
+
REG_GET(DC_IP_REQUEST_CNTL, IP_REQUEST_EN, &org_ip_request_cntl);
if (org_ip_request_cntl == 0)
REG_SET(DC_IP_REQUEST_CNTL, 0, IP_REQUEST_EN, 1);
@@ -336,6 +361,13 @@ void dcn31_dsc_pg_control(
if (org_ip_request_cntl == 0)
REG_SET(DC_IP_REQUEST_CNTL, 0, IP_REQUEST_EN, 0);
+
+ if (hws->ctx->dc->debug.root_clock_optimization.bits.dsc) {
+ if (hws->ctx->dc->res_pool->dccg->funcs->disable_dsc && !power_on)
+ hws->ctx->dc->res_pool->dccg->funcs->disable_dsc(
+ hws->ctx->dc->res_pool->dccg, dsc_inst);
+ }
+
}
@@ -579,3 +611,9 @@ void dcn31_reset_hw_ctx_wrap(
/* New dc_state in the process of being applied to hardware. */
dc->current_state->res_ctx.link_enc_cfg_ctx.mode = LINK_ENC_CFG_TRANSIENT;
}
+
+void dcn31_setup_hpo_hw_control(const struct dce_hwseq *hws, bool enable)
+{
+ if (hws->ctx->dc->debug.hpo_optimization)
+ REG_UPDATE(HPO_TOP_HW_CONTROL, HPO_IO_EN, !!enable);
+}
diff --git a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hwseq.h b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hwseq.h
index 7ae45dd202d9..edfc01d6ad73 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hwseq.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hwseq.h
@@ -54,5 +54,6 @@ void dcn31_reset_hw_ctx_wrap(
bool dcn31_is_abm_supported(struct dc *dc,
struct dc_state *context, struct dc_stream_state *stream);
void dcn31_init_pipes(struct dc *dc, struct dc_state *context);
+void dcn31_setup_hpo_hw_control(const struct dce_hwseq *hws, bool enable);
#endif /* __DC_HWSS_DCN31_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_init.c b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_init.c
index c6a737781ad1..05335a8c3c2d 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_init.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_init.c
@@ -137,6 +137,7 @@ static const struct hwseq_private_funcs dcn31_private_funcs = {
.dccg_init = dcn20_dccg_init,
.set_blend_lut = dcn30_set_blend_lut,
.set_shaper_3dlut = dcn20_set_shaper_3dlut,
+ .setup_hpo_hw_control = dcn31_setup_hpo_hw_control,
};
void dcn31_hw_sequencer_construct(struct dc *dc)
diff --git a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_resource.c b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_resource.c
index 7cb7604a35eb..18896294ae12 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_resource.c
@@ -101,8 +101,6 @@
#include "link_enc_cfg.h"
#define DC_LOGGER_INIT(logger)
-#define fixed16_to_double(x) (((double) x) / ((double) (1 << 16)))
-#define fixed16_to_double_to_cpu(x) fixed16_to_double(le32_to_cpu(x))
#define DCN3_1_DEFAULT_DET_SIZE 384
@@ -222,8 +220,8 @@ struct _vcs_dpi_soc_bounding_box_st dcn3_1_soc = {
.num_states = 5,
.sr_exit_time_us = 9.0,
.sr_enter_plus_exit_time_us = 11.0,
- .sr_exit_z8_time_us = 402.0,
- .sr_enter_plus_exit_z8_time_us = 520.0,
+ .sr_exit_z8_time_us = 442.0,
+ .sr_enter_plus_exit_z8_time_us = 560.0,
.writeback_latency_us = 12.0,
.dram_channel_width_bytes = 4,
.round_trip_ping_latency_dcfclk_cycles = 106,
@@ -862,7 +860,8 @@ static const struct dccg_mask dccg_mask = {
SR(D6VGA_CONTROL), \
SR(DC_IP_REQUEST_CNTL), \
SR(AZALIA_AUDIO_DTO), \
- SR(AZALIA_CONTROLLER_CLOCK_GATING)
+ SR(AZALIA_CONTROLLER_CLOCK_GATING), \
+ SR(HPO_TOP_HW_CONTROL)
static const struct dce_hwseq_registers hwseq_reg = {
HWSEQ_DCN31_REG_LIST()
@@ -899,7 +898,9 @@ static const struct dce_hwseq_registers hwseq_reg = {
HWS_SF(, DMU_MEM_PWR_CNTL, DMCU_ERAM_MEM_PWR_FORCE, mask_sh), \
HWS_SF(, ODM_MEM_PWR_CTRL3, ODM_MEM_UNASSIGNED_PWR_MODE, mask_sh), \
HWS_SF(, ODM_MEM_PWR_CTRL3, ODM_MEM_VBLANK_PWR_MODE, mask_sh), \
- HWS_SF(, MMHUBBUB_MEM_PWR_CNTL, VGA_MEM_PWR_FORCE, mask_sh)
+ HWS_SF(, MMHUBBUB_MEM_PWR_CNTL, VGA_MEM_PWR_FORCE, mask_sh), \
+ HWS_SF(, DIO_MEM_PWR_CTRL, I2C_LIGHT_SLEEP_FORCE, mask_sh), \
+ HWS_SF(, HPO_TOP_HW_CONTROL, HPO_IO_EN, mask_sh)
static const struct dce_hwseq_shift hwseq_shift = {
HWSEQ_DCN31_MASK_SH_LIST(__SHIFT)
@@ -998,7 +999,7 @@ static const struct dc_debug_options debug_defaults_drv = {
.disable_dcc = DCC_ENABLE,
.vsr_support = true,
.performance_trace = false,
- .max_downscale_src_width = 3840,/*upto 4K*/
+ .max_downscale_src_width = 4096,/*upto true 4K*/
.disable_pplib_wm_range = false,
.scl_reset_length10 = true,
.sanity_checks = false,
@@ -1312,10 +1313,6 @@ static struct vpg *dcn31_vpg_create(
&vpg_shift,
&vpg_mask);
- // Will re-enable hw block when we enable stream
- // Check for enabled stream before powering down?
- vpg31_powerdown(&vpg31->base);
-
return &vpg31->base;
}
@@ -1383,6 +1380,12 @@ static struct stream_encoder *dcn31_stream_encoder_create(
return NULL;
}
+ if (ctx->asic_id.chip_family == FAMILY_YELLOW_CARP &&
+ ctx->asic_id.hw_internal_rev == YELLOW_CARP_B0) {
+ if ((eng_id == ENGINE_ID_DIGC) || (eng_id == ENGINE_ID_DIGD))
+ eng_id = eng_id + 3; // For B0 only. C->F, D->G.
+ }
+
dcn30_dio_stream_encoder_construct(enc1, ctx, ctx->dc_bios,
eng_id, vpg, afmt,
&stream_enc_regs[eng_id],
@@ -1782,6 +1785,13 @@ static int dcn31_populate_dml_pipes_from_context(
pipe = &res_ctx->pipe_ctx[i];
timing = &pipe->stream->timing;
+ /*
+ * Immediate flip can be set dynamically after enabling the plane.
+ * We need to require support for immediate flip or underflow can be
+ * intermittently experienced depending on peak b/w requirements.
+ */
+ pipes[pipe_cnt].pipe.src.immediate_flip = true;
+
pipes[pipe_cnt].pipe.src.unbounded_req_mode = false;
pipes[pipe_cnt].pipe.src.gpuvm = true;
pipes[pipe_cnt].pipe.src.dcc_fraction_of_zs_req_luma = 0;
@@ -1824,7 +1834,7 @@ static int dcn31_populate_dml_pipes_from_context(
return pipe_cnt;
}
-static void dcn31_update_soc_for_wm_a(struct dc *dc, struct dc_state *context)
+void dcn31_update_soc_for_wm_a(struct dc *dc, struct dc_state *context)
{
if (dc->clk_mgr->bw_params->wm_table.entries[WM_A].valid) {
context->bw_ctx.dml.soc.dram_clock_change_latency_us = dc->clk_mgr->bw_params->wm_table.entries[WM_A].pstate_latency_us;
@@ -1968,7 +1978,7 @@ static void dcn31_calculate_wm_and_dlg_fp(
dcn20_calculate_dlg_params(dc, context, pipes, pipe_cnt, vlevel);
}
-static void dcn31_calculate_wm_and_dlg(
+void dcn31_calculate_wm_and_dlg(
struct dc *dc, struct dc_state *context,
display_e2e_pipe_params_st *pipes,
int pipe_cnt,
@@ -2450,6 +2460,8 @@ static bool dcn31_resource_construct(
dc->cap_funcs = cap_funcs;
+ dc->dcn_ip->max_num_dpp = dcn3_1_ip.max_num_dpp;
+
DC_FP_END();
return true;
diff --git a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_resource.h b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_resource.h
index 93571c976996..416fe7a721d8 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_resource.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_resource.h
@@ -35,6 +35,16 @@ struct dcn31_resource_pool {
struct resource_pool base;
};
+bool dcn31_validate_bandwidth(struct dc *dc,
+ struct dc_state *context,
+ bool fast_validate);
+void dcn31_calculate_wm_and_dlg(
+ struct dc *dc, struct dc_state *context,
+ display_e2e_pipe_params_st *pipes,
+ int pipe_cnt,
+ int vlevel);
+void dcn31_update_soc_for_wm_a(struct dc *dc, struct dc_state *context);
+
struct resource_pool *dcn31_create_resource_pool(
const struct dc_init_data *init_data,
struct dc *dc);
diff --git a/drivers/gpu/drm/amd/display/dc/dm_cp_psp.h b/drivers/gpu/drm/amd/display/dc/dm_cp_psp.h
index 43f33e186088..511f9e1159c7 100644
--- a/drivers/gpu/drm/amd/display/dc/dm_cp_psp.h
+++ b/drivers/gpu/drm/amd/display/dc/dm_cp_psp.h
@@ -35,6 +35,8 @@ struct cp_psp_stream_config {
uint8_t link_enc_idx;
uint8_t stream_enc_idx;
uint8_t phy_idx;
+ uint8_t dio_output_idx;
+ uint8_t dio_output_type;
uint8_t assr_enabled;
uint8_t mst_enabled;
uint8_t dp2_enabled;
diff --git a/drivers/gpu/drm/amd/display/dc/dml/Makefile b/drivers/gpu/drm/amd/display/dc/dml/Makefile
index 169a4e68f86e..eee6672bd32d 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/Makefile
+++ b/drivers/gpu/drm/amd/display/dc/dml/Makefile
@@ -70,6 +70,8 @@ CFLAGS_$(AMDDALPATH)/dc/dml/dcn30/display_mode_vba_30.o := $(dml_ccflags) $(fram
CFLAGS_$(AMDDALPATH)/dc/dml/dcn30/display_rq_dlg_calc_30.o := $(dml_ccflags)
CFLAGS_$(AMDDALPATH)/dc/dml/dcn31/display_mode_vba_31.o := $(dml_ccflags) $(frame_warn_flag)
CFLAGS_$(AMDDALPATH)/dc/dml/dcn31/display_rq_dlg_calc_31.o := $(dml_ccflags)
+CFLAGS_$(AMDDALPATH)/dc/dml/dcn301/dcn301_fpu.o := $(dml_ccflags)
+CFLAGS_$(AMDDALPATH)/dc/dml/dsc/rc_calc_fpu.o := $(dml_ccflags)
CFLAGS_$(AMDDALPATH)/dc/dml/display_mode_lib.o := $(dml_ccflags)
CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml/display_mode_vba.o := $(dml_rcflags)
CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml/dcn2x/dcn2x.o := $(dml_rcflags)
@@ -83,7 +85,9 @@ CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml/dcn30/display_mode_vba_30.o := $(dml_rcflags)
CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml/dcn30/display_rq_dlg_calc_30.o := $(dml_rcflags)
CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml/dcn31/display_mode_vba_31.o := $(dml_rcflags)
CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml/dcn31/display_rq_dlg_calc_31.o := $(dml_rcflags)
+CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml/dcn301/dcn301_fpu.o := $(dml_rcflags)
CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml/display_mode_lib.o := $(dml_rcflags)
+CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml/dsc/rc_calc_fpu.o := $(dml_rcflags)
endif
CFLAGS_$(AMDDALPATH)/dc/dml/dml1_display_rq_dlg_calc.o := $(dml_ccflags)
CFLAGS_$(AMDDALPATH)/dc/dml/display_rq_dlg_helpers.o := $(dml_ccflags)
@@ -99,6 +103,8 @@ DML += dcn20/display_rq_dlg_calc_20v2.o dcn20/display_mode_vba_20v2.o
DML += dcn21/display_rq_dlg_calc_21.o dcn21/display_mode_vba_21.o
DML += dcn30/display_mode_vba_30.o dcn30/display_rq_dlg_calc_30.o
DML += dcn31/display_mode_vba_31.o dcn31/display_rq_dlg_calc_31.o
+DML += dcn301/dcn301_fpu.o
+DML += dsc/rc_calc_fpu.o
endif
AMD_DAL_DML = $(addprefix $(AMDDALPATH)/dc/dml/,$(DML))
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn30/display_mode_vba_30.c b/drivers/gpu/drm/amd/display/dc/dml/dcn30/display_mode_vba_30.c
index e3d9f1decdfc..f47d82da115c 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn30/display_mode_vba_30.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn30/display_mode_vba_30.c
@@ -3576,16 +3576,9 @@ static double TruncToValidBPP(
MinDSCBPP = 8;
MaxDSCBPP = 3 * DSCInputBitPerComponent - 1.0 / 16;
} else {
- if (Output == dm_hdmi) {
- NonDSCBPP0 = 24;
- NonDSCBPP1 = 24;
- NonDSCBPP2 = 24;
- }
- else {
- NonDSCBPP0 = 16;
- NonDSCBPP1 = 20;
- NonDSCBPP2 = 24;
- }
+ NonDSCBPP0 = 16;
+ NonDSCBPP1 = 20;
+ NonDSCBPP2 = 24;
if (Format == dm_n422) {
MinDSCBPP = 7;
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn301/dcn301_fpu.c b/drivers/gpu/drm/amd/display/dc/dml/dcn301/dcn301_fpu.c
new file mode 100644
index 000000000000..94c32832a0e7
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn301/dcn301_fpu.c
@@ -0,0 +1,390 @@
+/*
+ * Copyright 2019-2021 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+#include "resource.h"
+#include "clk_mgr.h"
+#include "dcn20/dcn20_resource.h"
+#include "dcn301/dcn301_resource.h"
+
+#include "dml/dcn20/dcn20_fpu.h"
+#include "dcn301_fpu.h"
+
+#define TO_DCN301_RES_POOL(pool)\
+ container_of(pool, struct dcn301_resource_pool, base)
+
+/* Based on: //vidip/dc/dcn3/doc/architecture/DCN3x_Display_Mode.xlsm#83 */
+struct _vcs_dpi_ip_params_st dcn3_01_ip = {
+ .odm_capable = 1,
+ .gpuvm_enable = 1,
+ .hostvm_enable = 1,
+ .gpuvm_max_page_table_levels = 1,
+ .hostvm_max_page_table_levels = 2,
+ .hostvm_cached_page_table_levels = 0,
+ .pte_group_size_bytes = 2048,
+ .num_dsc = 3,
+ .rob_buffer_size_kbytes = 184,
+ .det_buffer_size_kbytes = 184,
+ .dpte_buffer_size_in_pte_reqs_luma = 64,
+ .dpte_buffer_size_in_pte_reqs_chroma = 32,
+ .pde_proc_buffer_size_64k_reqs = 48,
+ .dpp_output_buffer_pixels = 2560,
+ .opp_output_buffer_lines = 1,
+ .pixel_chunk_size_kbytes = 8,
+ .meta_chunk_size_kbytes = 2,
+ .writeback_chunk_size_kbytes = 8,
+ .line_buffer_size_bits = 789504,
+ .is_line_buffer_bpp_fixed = 0, // ?
+ .line_buffer_fixed_bpp = 48, // ?
+ .dcc_supported = true,
+ .writeback_interface_buffer_size_kbytes = 90,
+ .writeback_line_buffer_buffer_size = 656640,
+ .max_line_buffer_lines = 12,
+ .writeback_luma_buffer_size_kbytes = 12, // writeback_line_buffer_buffer_size = 656640
+ .writeback_chroma_buffer_size_kbytes = 8,
+ .writeback_chroma_line_buffer_width_pixels = 4,
+ .writeback_max_hscl_ratio = 1,
+ .writeback_max_vscl_ratio = 1,
+ .writeback_min_hscl_ratio = 1,
+ .writeback_min_vscl_ratio = 1,
+ .writeback_max_hscl_taps = 1,
+ .writeback_max_vscl_taps = 1,
+ .writeback_line_buffer_luma_buffer_size = 0,
+ .writeback_line_buffer_chroma_buffer_size = 14643,
+ .cursor_buffer_size = 8,
+ .cursor_chunk_size = 2,
+ .max_num_otg = 4,
+ .max_num_dpp = 4,
+ .max_num_wb = 1,
+ .max_dchub_pscl_bw_pix_per_clk = 4,
+ .max_pscl_lb_bw_pix_per_clk = 2,
+ .max_lb_vscl_bw_pix_per_clk = 4,
+ .max_vscl_hscl_bw_pix_per_clk = 4,
+ .max_hscl_ratio = 6,
+ .max_vscl_ratio = 6,
+ .hscl_mults = 4,
+ .vscl_mults = 4,
+ .max_hscl_taps = 8,
+ .max_vscl_taps = 8,
+ .dispclk_ramp_margin_percent = 1,
+ .underscan_factor = 1.11,
+ .min_vblank_lines = 32,
+ .dppclk_delay_subtotal = 46,
+ .dynamic_metadata_vm_enabled = true,
+ .dppclk_delay_scl_lb_only = 16,
+ .dppclk_delay_scl = 50,
+ .dppclk_delay_cnvc_formatter = 27,
+ .dppclk_delay_cnvc_cursor = 6,
+ .dispclk_delay_subtotal = 119,
+ .dcfclk_cstate_latency = 5.2, // SRExitTime
+ .max_inter_dcn_tile_repeaters = 8,
+ .max_num_hdmi_frl_outputs = 0,
+ .odm_combine_4to1_supported = true,
+
+ .xfc_supported = false,
+ .xfc_fill_bw_overhead_percent = 10.0,
+ .xfc_fill_constant_bytes = 0,
+ .gfx7_compat_tiling_supported = 0,
+ .number_of_cursors = 1,
+};
+
+struct _vcs_dpi_soc_bounding_box_st dcn3_01_soc = {
+ .clock_limits = {
+ {
+ .state = 0,
+ .dram_speed_mts = 2400.0,
+ .fabricclk_mhz = 600,
+ .socclk_mhz = 278.0,
+ .dcfclk_mhz = 400.0,
+ .dscclk_mhz = 206.0,
+ .dppclk_mhz = 1015.0,
+ .dispclk_mhz = 1015.0,
+ .phyclk_mhz = 600.0,
+ },
+
+ {
+ .state = 1,
+ .dram_speed_mts = 2400.0,
+ .fabricclk_mhz = 688,
+ .socclk_mhz = 278.0,
+ .dcfclk_mhz = 400.0,
+ .dscclk_mhz = 206.0,
+ .dppclk_mhz = 1015.0,
+ .dispclk_mhz = 1015.0,
+ .phyclk_mhz = 600.0,
+ },
+
+ {
+ .state = 2,
+ .dram_speed_mts = 4267.0,
+ .fabricclk_mhz = 1067,
+ .socclk_mhz = 278.0,
+ .dcfclk_mhz = 608.0,
+ .dscclk_mhz = 296.0,
+ .dppclk_mhz = 1015.0,
+ .dispclk_mhz = 1015.0,
+ .phyclk_mhz = 810.0,
+ },
+
+ {
+ .state = 3,
+ .dram_speed_mts = 4267.0,
+ .fabricclk_mhz = 1067,
+ .socclk_mhz = 715.0,
+ .dcfclk_mhz = 676.0,
+ .dscclk_mhz = 338.0,
+ .dppclk_mhz = 1015.0,
+ .dispclk_mhz = 1015.0,
+ .phyclk_mhz = 810.0,
+ },
+
+ {
+ .state = 4,
+ .dram_speed_mts = 4267.0,
+ .fabricclk_mhz = 1067,
+ .socclk_mhz = 953.0,
+ .dcfclk_mhz = 810.0,
+ .dscclk_mhz = 338.0,
+ .dppclk_mhz = 1015.0,
+ .dispclk_mhz = 1015.0,
+ .phyclk_mhz = 810.0,
+ },
+ },
+
+ .sr_exit_time_us = 9.0,
+ .sr_enter_plus_exit_time_us = 11.0,
+ .urgent_latency_us = 4.0,
+ .urgent_latency_pixel_data_only_us = 4.0,
+ .urgent_latency_pixel_mixed_with_vm_data_us = 4.0,
+ .urgent_latency_vm_data_only_us = 4.0,
+ .urgent_out_of_order_return_per_channel_pixel_only_bytes = 4096,
+ .urgent_out_of_order_return_per_channel_pixel_and_vm_bytes = 4096,
+ .urgent_out_of_order_return_per_channel_vm_only_bytes = 4096,
+ .pct_ideal_dram_sdp_bw_after_urgent_pixel_only = 80.0,
+ .pct_ideal_dram_sdp_bw_after_urgent_pixel_and_vm = 75.0,
+ .pct_ideal_dram_sdp_bw_after_urgent_vm_only = 40.0,
+ .max_avg_sdp_bw_use_normal_percent = 60.0,
+ .max_avg_dram_bw_use_normal_percent = 60.0,
+ .writeback_latency_us = 12.0,
+ .max_request_size_bytes = 256,
+ .dram_channel_width_bytes = 4,
+ .fabric_datapath_to_dcn_data_return_bytes = 32,
+ .dcn_downspread_percent = 0.5,
+ .downspread_percent = 0.38,
+ .dram_page_open_time_ns = 50.0,
+ .dram_rw_turnaround_time_ns = 17.5,
+ .dram_return_buffer_per_channel_bytes = 8192,
+ .round_trip_ping_latency_dcfclk_cycles = 191,
+ .urgent_out_of_order_return_per_channel_bytes = 4096,
+ .channel_interleave_bytes = 256,
+ .num_banks = 8,
+ .num_chans = 4,
+ .gpuvm_min_page_size_bytes = 4096,
+ .hostvm_min_page_size_bytes = 4096,
+ .dram_clock_change_latency_us = 23.84,
+ .writeback_dram_clock_change_latency_us = 23.0,
+ .return_bus_width_bytes = 64,
+ .dispclk_dppclk_vco_speed_mhz = 3550,
+ .xfc_bus_transport_time_us = 20, // ?
+ .xfc_xbuf_latency_tolerance_us = 4, // ?
+ .use_urgent_burst_bw = 1, // ?
+ .num_states = 5,
+ .do_urgent_latency_adjustment = false,
+ .urgent_latency_adjustment_fabric_clock_component_us = 0,
+ .urgent_latency_adjustment_fabric_clock_reference_mhz = 0,
+};
+
+static void calculate_wm_set_for_vlevel(int vlevel,
+ struct wm_range_table_entry *table_entry,
+ struct dcn_watermarks *wm_set,
+ struct display_mode_lib *dml,
+ display_e2e_pipe_params_st *pipes,
+ int pipe_cnt)
+{
+ double dram_clock_change_latency_cached = dml->soc.dram_clock_change_latency_us;
+
+ ASSERT(vlevel < dml->soc.num_states);
+ /* only pipe 0 is read for voltage and dcf/soc clocks */
+ pipes[0].clks_cfg.voltage = vlevel;
+ pipes[0].clks_cfg.dcfclk_mhz = dml->soc.clock_limits[vlevel].dcfclk_mhz;
+ pipes[0].clks_cfg.socclk_mhz = dml->soc.clock_limits[vlevel].socclk_mhz;
+
+ dml->soc.dram_clock_change_latency_us = table_entry->pstate_latency_us;
+ dml->soc.sr_exit_time_us = table_entry->sr_exit_time_us;
+ dml->soc.sr_enter_plus_exit_time_us = table_entry->sr_enter_plus_exit_time_us;
+
+ wm_set->urgent_ns = get_wm_urgent(dml, pipes, pipe_cnt) * 1000;
+ wm_set->cstate_pstate.cstate_enter_plus_exit_ns = get_wm_stutter_enter_exit(dml, pipes, pipe_cnt) * 1000;
+ wm_set->cstate_pstate.cstate_exit_ns = get_wm_stutter_exit(dml, pipes, pipe_cnt) * 1000;
+ wm_set->cstate_pstate.pstate_change_ns = get_wm_dram_clock_change(dml, pipes, pipe_cnt) * 1000;
+ wm_set->pte_meta_urgent_ns = get_wm_memory_trip(dml, pipes, pipe_cnt) * 1000;
+ wm_set->frac_urg_bw_nom = get_fraction_of_urgent_bandwidth(dml, pipes, pipe_cnt) * 1000;
+ wm_set->frac_urg_bw_flip = get_fraction_of_urgent_bandwidth_imm_flip(dml, pipes, pipe_cnt) * 1000;
+ wm_set->urgent_latency_ns = get_urgent_latency(dml, pipes, pipe_cnt) * 1000;
+ dml->soc.dram_clock_change_latency_us = dram_clock_change_latency_cached;
+
+}
+
+void dcn301_update_bw_bounding_box(struct dc *dc, struct clk_bw_params *bw_params)
+{
+ struct dcn301_resource_pool *pool = TO_DCN301_RES_POOL(dc->res_pool);
+ struct clk_limit_table *clk_table = &bw_params->clk_table;
+ struct _vcs_dpi_voltage_scaling_st clock_limits[DC__VOLTAGE_STATES];
+ unsigned int i, closest_clk_lvl;
+ int j;
+
+ dc_assert_fp_enabled();
+
+ /* Default clock levels are used for diags, which may lead to overclocking. */
+ if (!IS_DIAG_DC(dc->ctx->dce_environment)) {
+ dcn3_01_ip.max_num_otg = pool->base.res_cap->num_timing_generator;
+ dcn3_01_ip.max_num_dpp = pool->base.pipe_count;
+ dcn3_01_soc.num_chans = bw_params->num_channels;
+
+ ASSERT(clk_table->num_entries);
+ for (i = 0; i < clk_table->num_entries; i++) {
+ /* loop backwards*/
+ for (closest_clk_lvl = 0, j = dcn3_01_soc.num_states - 1; j >= 0; j--) {
+ if ((unsigned int) dcn3_01_soc.clock_limits[j].dcfclk_mhz <= clk_table->entries[i].dcfclk_mhz) {
+ closest_clk_lvl = j;
+ break;
+ }
+ }
+
+ clock_limits[i].state = i;
+ clock_limits[i].dcfclk_mhz = clk_table->entries[i].dcfclk_mhz;
+ clock_limits[i].fabricclk_mhz = clk_table->entries[i].fclk_mhz;
+ clock_limits[i].socclk_mhz = clk_table->entries[i].socclk_mhz;
+ clock_limits[i].dram_speed_mts = clk_table->entries[i].memclk_mhz * 2;
+
+ clock_limits[i].dispclk_mhz = dcn3_01_soc.clock_limits[closest_clk_lvl].dispclk_mhz;
+ clock_limits[i].dppclk_mhz = dcn3_01_soc.clock_limits[closest_clk_lvl].dppclk_mhz;
+ clock_limits[i].dram_bw_per_chan_gbps = dcn3_01_soc.clock_limits[closest_clk_lvl].dram_bw_per_chan_gbps;
+ clock_limits[i].dscclk_mhz = dcn3_01_soc.clock_limits[closest_clk_lvl].dscclk_mhz;
+ clock_limits[i].dtbclk_mhz = dcn3_01_soc.clock_limits[closest_clk_lvl].dtbclk_mhz;
+ clock_limits[i].phyclk_d18_mhz = dcn3_01_soc.clock_limits[closest_clk_lvl].phyclk_d18_mhz;
+ clock_limits[i].phyclk_mhz = dcn3_01_soc.clock_limits[closest_clk_lvl].phyclk_mhz;
+ }
+
+ for (i = 0; i < clk_table->num_entries; i++)
+ dcn3_01_soc.clock_limits[i] = clock_limits[i];
+
+ if (clk_table->num_entries) {
+ dcn3_01_soc.num_states = clk_table->num_entries;
+ /* duplicate last level */
+ dcn3_01_soc.clock_limits[dcn3_01_soc.num_states] = dcn3_01_soc.clock_limits[dcn3_01_soc.num_states - 1];
+ dcn3_01_soc.clock_limits[dcn3_01_soc.num_states].state = dcn3_01_soc.num_states;
+ }
+ }
+
+ dcn3_01_soc.dispclk_dppclk_vco_speed_mhz = dc->clk_mgr->dentist_vco_freq_khz / 1000.0;
+ dc->dml.soc.dispclk_dppclk_vco_speed_mhz = dc->clk_mgr->dentist_vco_freq_khz / 1000.0;
+
+ dml_init_instance(&dc->dml, &dcn3_01_soc, &dcn3_01_ip, DML_PROJECT_DCN30);
+}
+
+void dcn301_fpu_set_wm_ranges(int i,
+ struct pp_smu_wm_range_sets *ranges,
+ struct _vcs_dpi_soc_bounding_box_st *loaded_bb)
+{
+ dc_assert_fp_enabled();
+
+ ranges->reader_wm_sets[i].min_fill_clk_mhz = (i > 0) ? (loaded_bb->clock_limits[i - 1].dram_speed_mts / 16) + 1 : 0;
+ ranges->reader_wm_sets[i].max_fill_clk_mhz = loaded_bb->clock_limits[i].dram_speed_mts / 16;
+}
+
+void dcn301_fpu_init_soc_bounding_box(struct bp_soc_bb_info bb_info)
+{
+ dc_assert_fp_enabled();
+
+ if (bb_info.dram_clock_change_latency_100ns > 0)
+ dcn3_01_soc.dram_clock_change_latency_us = bb_info.dram_clock_change_latency_100ns * 10;
+
+ if (bb_info.dram_sr_enter_exit_latency_100ns > 0)
+ dcn3_01_soc.sr_enter_plus_exit_time_us = bb_info.dram_sr_enter_exit_latency_100ns * 10;
+
+ if (bb_info.dram_sr_exit_latency_100ns > 0)
+ dcn3_01_soc.sr_exit_time_us = bb_info.dram_sr_exit_latency_100ns * 10;
+}
+
+void dcn301_calculate_wm_and_dlg(struct dc *dc,
+ struct dc_state *context,
+ display_e2e_pipe_params_st *pipes,
+ int pipe_cnt,
+ int vlevel_req)
+{
+ int i, pipe_idx;
+ int vlevel, vlevel_max;
+ struct wm_range_table_entry *table_entry;
+ struct clk_bw_params *bw_params = dc->clk_mgr->bw_params;
+
+ ASSERT(bw_params);
+ dc_assert_fp_enabled();
+
+ vlevel_max = bw_params->clk_table.num_entries - 1;
+
+ /* WM Set D */
+ table_entry = &bw_params->wm_table.entries[WM_D];
+ if (table_entry->wm_type == WM_TYPE_RETRAINING)
+ vlevel = 0;
+ else
+ vlevel = vlevel_max;
+ calculate_wm_set_for_vlevel(vlevel, table_entry, &context->bw_ctx.bw.dcn.watermarks.d,
+ &context->bw_ctx.dml, pipes, pipe_cnt);
+ /* WM Set C */
+ table_entry = &bw_params->wm_table.entries[WM_C];
+ vlevel = min(max(vlevel_req, 2), vlevel_max);
+ calculate_wm_set_for_vlevel(vlevel, table_entry, &context->bw_ctx.bw.dcn.watermarks.c,
+ &context->bw_ctx.dml, pipes, pipe_cnt);
+ /* WM Set B */
+ table_entry = &bw_params->wm_table.entries[WM_B];
+ vlevel = min(max(vlevel_req, 1), vlevel_max);
+ calculate_wm_set_for_vlevel(vlevel, table_entry, &context->bw_ctx.bw.dcn.watermarks.b,
+ &context->bw_ctx.dml, pipes, pipe_cnt);
+
+ /* WM Set A */
+ table_entry = &bw_params->wm_table.entries[WM_A];
+ vlevel = min(vlevel_req, vlevel_max);
+ calculate_wm_set_for_vlevel(vlevel, table_entry, &context->bw_ctx.bw.dcn.watermarks.a,
+ &context->bw_ctx.dml, pipes, pipe_cnt);
+
+ for (i = 0, pipe_idx = 0; i < dc->res_pool->pipe_count; i++) {
+ if (!context->res_ctx.pipe_ctx[i].stream)
+ continue;
+
+ pipes[pipe_idx].clks_cfg.dispclk_mhz = get_dispclk_calculated(&context->bw_ctx.dml, pipes, pipe_cnt);
+ pipes[pipe_idx].clks_cfg.dppclk_mhz = get_dppclk_calculated(&context->bw_ctx.dml, pipes, pipe_cnt, pipe_idx);
+
+ if (dc->config.forced_clocks) {
+ pipes[pipe_idx].clks_cfg.dispclk_mhz = context->bw_ctx.dml.soc.clock_limits[0].dispclk_mhz;
+ pipes[pipe_idx].clks_cfg.dppclk_mhz = context->bw_ctx.dml.soc.clock_limits[0].dppclk_mhz;
+ }
+ if (dc->debug.min_disp_clk_khz > pipes[pipe_idx].clks_cfg.dispclk_mhz * 1000)
+ pipes[pipe_idx].clks_cfg.dispclk_mhz = dc->debug.min_disp_clk_khz / 1000.0;
+ if (dc->debug.min_dpp_clk_khz > pipes[pipe_idx].clks_cfg.dppclk_mhz * 1000)
+ pipes[pipe_idx].clks_cfg.dppclk_mhz = dc->debug.min_dpp_clk_khz / 1000.0;
+ pipe_idx++;
+ }
+
+ dcn20_calculate_dlg_params(dc, context, pipes, pipe_cnt, vlevel);
+}
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn301/dcn301_fpu.h b/drivers/gpu/drm/amd/display/dc/dml/dcn301/dcn301_fpu.h
new file mode 100644
index 000000000000..fc7065d17842
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn301/dcn301_fpu.h
@@ -0,0 +1,42 @@
+/*
+ * Copyright 2019-2021 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef __DCN301_FPU_H__
+#define __DCN301_FPU_H__
+
+void dcn301_update_bw_bounding_box(struct dc *dc, struct clk_bw_params *bw_params);
+
+void dcn301_fpu_set_wm_ranges(int i,
+ struct pp_smu_wm_range_sets *ranges,
+ struct _vcs_dpi_soc_bounding_box_st *loaded_bb);
+
+void dcn301_fpu_init_soc_bounding_box(struct bp_soc_bb_info bb_info);
+
+void dcn301_calculate_wm_and_dlg(struct dc *dc,
+ struct dc_state *context,
+ display_e2e_pipe_params_st *pipes,
+ int pipe_cnt,
+ int vlevel_req);
+#endif /* __DCN301_FPU_H__*/
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn31/display_mode_vba_31.c b/drivers/gpu/drm/amd/display/dc/dml/dcn31/display_mode_vba_31.c
index ce55c9caf9a2..7e937bdcea00 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn31/display_mode_vba_31.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn31/display_mode_vba_31.c
@@ -3892,15 +3892,11 @@ static double TruncToValidBPP(
MinDSCBPP = 8;
MaxDSCBPP = 3 * DSCInputBitPerComponent - 1.0 / 16;
} else {
- if (Output == dm_hdmi) {
- NonDSCBPP0 = 24;
- NonDSCBPP1 = 24;
- NonDSCBPP2 = 24;
- } else {
- NonDSCBPP0 = 16;
- NonDSCBPP1 = 20;
- NonDSCBPP2 = 24;
- }
+
+ NonDSCBPP0 = 16;
+ NonDSCBPP1 = 20;
+ NonDSCBPP2 = 24;
+
if (Format == dm_n422) {
MinDSCBPP = 7;
MaxDSCBPP = 2 * DSCInputBitPerComponent - 1.0 / 16.0;
@@ -5398,9 +5394,9 @@ void dml31_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l
v->MaximumReadBandwidthWithPrefetch =
v->MaximumReadBandwidthWithPrefetch
- + dml_max4(
- v->VActivePixelBandwidth[i][j][k],
- v->VActiveCursorBandwidth[i][j][k]
+ + dml_max3(
+ v->VActivePixelBandwidth[i][j][k]
+ + v->VActiveCursorBandwidth[i][j][k]
+ v->NoOfDPP[i][j][k]
* (v->meta_row_bandwidth[i][j][k]
+ v->dpte_row_bandwidth[i][j][k]),
diff --git a/drivers/gpu/drm/amd/display/dc/dsc/qp_tables.h b/drivers/gpu/drm/amd/display/dc/dml/dsc/qp_tables.h
index e5fac9f4181d..e5fac9f4181d 100644
--- a/drivers/gpu/drm/amd/display/dc/dsc/qp_tables.h
+++ b/drivers/gpu/drm/amd/display/dc/dml/dsc/qp_tables.h
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dsc/rc_calc_fpu.c b/drivers/gpu/drm/amd/display/dc/dml/dsc/rc_calc_fpu.c
new file mode 100644
index 000000000000..3ee858f311d1
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dml/dsc/rc_calc_fpu.c
@@ -0,0 +1,291 @@
+/*
+ * Copyright 2021 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#include "rc_calc_fpu.h"
+
+#include "qp_tables.h"
+#include "amdgpu_dm/dc_fpu.h"
+
+#define table_hash(mode, bpc, max_min) ((mode << 16) | (bpc << 8) | max_min)
+
+#define MODE_SELECT(val444, val422, val420) \
+ (cm == CM_444 || cm == CM_RGB) ? (val444) : (cm == CM_422 ? (val422) : (val420))
+
+
+#define TABLE_CASE(mode, bpc, max) case (table_hash(mode, BPC_##bpc, max)): \
+ table = qp_table_##mode##_##bpc##bpc_##max; \
+ table_size = sizeof(qp_table_##mode##_##bpc##bpc_##max)/sizeof(*qp_table_##mode##_##bpc##bpc_##max); \
+ break
+
+static int median3(int a, int b, int c)
+{
+ if (a > b)
+ swap(a, b);
+ if (b > c)
+ swap(b, c);
+ if (a > b)
+ swap(b, c);
+
+ return b;
+}
+
+static double dsc_roundf(double num)
+{
+ if (num < 0.0)
+ num = num - 0.5;
+ else
+ num = num + 0.5;
+
+ return (int)(num);
+}
+
+static double dsc_ceil(double num)
+{
+ double retval = (int)num;
+
+ if (retval != num && num > 0)
+ retval = num + 1;
+
+ return (int)retval;
+}
+
+static void get_qp_set(qp_set qps, enum colour_mode cm, enum bits_per_comp bpc,
+ enum max_min max_min, float bpp)
+{
+ int mode = MODE_SELECT(444, 422, 420);
+ int sel = table_hash(mode, bpc, max_min);
+ int table_size = 0;
+ int index;
+ const struct qp_entry *table = 0L;
+
+ // alias enum
+ enum { min = DAL_MM_MIN, max = DAL_MM_MAX };
+ switch (sel) {
+ TABLE_CASE(444, 8, max);
+ TABLE_CASE(444, 8, min);
+ TABLE_CASE(444, 10, max);
+ TABLE_CASE(444, 10, min);
+ TABLE_CASE(444, 12, max);
+ TABLE_CASE(444, 12, min);
+ TABLE_CASE(422, 8, max);
+ TABLE_CASE(422, 8, min);
+ TABLE_CASE(422, 10, max);
+ TABLE_CASE(422, 10, min);
+ TABLE_CASE(422, 12, max);
+ TABLE_CASE(422, 12, min);
+ TABLE_CASE(420, 8, max);
+ TABLE_CASE(420, 8, min);
+ TABLE_CASE(420, 10, max);
+ TABLE_CASE(420, 10, min);
+ TABLE_CASE(420, 12, max);
+ TABLE_CASE(420, 12, min);
+ }
+
+ if (table == 0)
+ return;
+
+ index = (bpp - table[0].bpp) * 2;
+
+ /* requested size is bigger than the table */
+ if (index >= table_size) {
+ dm_error("ERROR: Requested rc_calc to find a bpp entry that exceeds the table size\n");
+ return;
+ }
+
+ memcpy(qps, table[index].qps, sizeof(qp_set));
+}
+
+static void get_ofs_set(qp_set ofs, enum colour_mode mode, float bpp)
+{
+ int *p = ofs;
+
+ if (mode == CM_444 || mode == CM_RGB) {
+ *p++ = (bpp <= 6) ? (0) : ((((bpp >= 8) && (bpp <= 12))) ? (2) : ((bpp >= 15) ? (10) : ((((bpp > 6) && (bpp < 8))) ? (0 + dsc_roundf((bpp - 6) * (2 / 2.0))) : (2 + dsc_roundf((bpp - 12) * (8 / 3.0))))));
+ *p++ = (bpp <= 6) ? (-2) : ((((bpp >= 8) && (bpp <= 12))) ? (0) : ((bpp >= 15) ? (8) : ((((bpp > 6) && (bpp < 8))) ? (-2 + dsc_roundf((bpp - 6) * (2 / 2.0))) : (0 + dsc_roundf((bpp - 12) * (8 / 3.0))))));
+ *p++ = (bpp <= 6) ? (-2) : ((((bpp >= 8) && (bpp <= 12))) ? (0) : ((bpp >= 15) ? (6) : ((((bpp > 6) && (bpp < 8))) ? (-2 + dsc_roundf((bpp - 6) * (2 / 2.0))) : (0 + dsc_roundf((bpp - 12) * (6 / 3.0))))));
+ *p++ = (bpp <= 6) ? (-4) : ((((bpp >= 8) && (bpp <= 12))) ? (-2) : ((bpp >= 15) ? (4) : ((((bpp > 6) && (bpp < 8))) ? (-4 + dsc_roundf((bpp - 6) * (2 / 2.0))) : (-2 + dsc_roundf((bpp - 12) * (6 / 3.0))))));
+ *p++ = (bpp <= 6) ? (-6) : ((((bpp >= 8) && (bpp <= 12))) ? (-4) : ((bpp >= 15) ? (2) : ((((bpp > 6) && (bpp < 8))) ? (-6 + dsc_roundf((bpp - 6) * (2 / 2.0))) : (-4 + dsc_roundf((bpp - 12) * (6 / 3.0))))));
+ *p++ = (bpp <= 12) ? (-6) : ((bpp >= 15) ? (0) : (-6 + dsc_roundf((bpp - 12) * (6 / 3.0))));
+ *p++ = (bpp <= 12) ? (-8) : ((bpp >= 15) ? (-2) : (-8 + dsc_roundf((bpp - 12) * (6 / 3.0))));
+ *p++ = (bpp <= 12) ? (-8) : ((bpp >= 15) ? (-4) : (-8 + dsc_roundf((bpp - 12) * (4 / 3.0))));
+ *p++ = (bpp <= 12) ? (-8) : ((bpp >= 15) ? (-6) : (-8 + dsc_roundf((bpp - 12) * (2 / 3.0))));
+ *p++ = (bpp <= 12) ? (-10) : ((bpp >= 15) ? (-8) : (-10 + dsc_roundf((bpp - 12) * (2 / 3.0))));
+ *p++ = -10;
+ *p++ = (bpp <= 6) ? (-12) : ((bpp >= 8) ? (-10) : (-12 + dsc_roundf((bpp - 6) * (2 / 2.0))));
+ *p++ = -12;
+ *p++ = -12;
+ *p++ = -12;
+ } else if (mode == CM_422) {
+ *p++ = (bpp <= 8) ? (2) : ((bpp >= 10) ? (10) : (2 + dsc_roundf((bpp - 8) * (8 / 2.0))));
+ *p++ = (bpp <= 8) ? (0) : ((bpp >= 10) ? (8) : (0 + dsc_roundf((bpp - 8) * (8 / 2.0))));
+ *p++ = (bpp <= 8) ? (0) : ((bpp >= 10) ? (6) : (0 + dsc_roundf((bpp - 8) * (6 / 2.0))));
+ *p++ = (bpp <= 8) ? (-2) : ((bpp >= 10) ? (4) : (-2 + dsc_roundf((bpp - 8) * (6 / 2.0))));
+ *p++ = (bpp <= 8) ? (-4) : ((bpp >= 10) ? (2) : (-4 + dsc_roundf((bpp - 8) * (6 / 2.0))));
+ *p++ = (bpp <= 8) ? (-6) : ((bpp >= 10) ? (0) : (-6 + dsc_roundf((bpp - 8) * (6 / 2.0))));
+ *p++ = (bpp <= 8) ? (-8) : ((bpp >= 10) ? (-2) : (-8 + dsc_roundf((bpp - 8) * (6 / 2.0))));
+ *p++ = (bpp <= 8) ? (-8) : ((bpp >= 10) ? (-4) : (-8 + dsc_roundf((bpp - 8) * (4 / 2.0))));
+ *p++ = (bpp <= 8) ? (-8) : ((bpp >= 10) ? (-6) : (-8 + dsc_roundf((bpp - 8) * (2 / 2.0))));
+ *p++ = (bpp <= 8) ? (-10) : ((bpp >= 10) ? (-8) : (-10 + dsc_roundf((bpp - 8) * (2 / 2.0))));
+ *p++ = -10;
+ *p++ = (bpp <= 6) ? (-12) : ((bpp >= 7) ? (-10) : (-12 + dsc_roundf((bpp - 6) * (2.0 / 1))));
+ *p++ = -12;
+ *p++ = -12;
+ *p++ = -12;
+ } else {
+ *p++ = (bpp <= 6) ? (2) : ((bpp >= 8) ? (10) : (2 + dsc_roundf((bpp - 6) * (8 / 2.0))));
+ *p++ = (bpp <= 6) ? (0) : ((bpp >= 8) ? (8) : (0 + dsc_roundf((bpp - 6) * (8 / 2.0))));
+ *p++ = (bpp <= 6) ? (0) : ((bpp >= 8) ? (6) : (0 + dsc_roundf((bpp - 6) * (6 / 2.0))));
+ *p++ = (bpp <= 6) ? (-2) : ((bpp >= 8) ? (4) : (-2 + dsc_roundf((bpp - 6) * (6 / 2.0))));
+ *p++ = (bpp <= 6) ? (-4) : ((bpp >= 8) ? (2) : (-4 + dsc_roundf((bpp - 6) * (6 / 2.0))));
+ *p++ = (bpp <= 6) ? (-6) : ((bpp >= 8) ? (0) : (-6 + dsc_roundf((bpp - 6) * (6 / 2.0))));
+ *p++ = (bpp <= 6) ? (-8) : ((bpp >= 8) ? (-2) : (-8 + dsc_roundf((bpp - 6) * (6 / 2.0))));
+ *p++ = (bpp <= 6) ? (-8) : ((bpp >= 8) ? (-4) : (-8 + dsc_roundf((bpp - 6) * (4 / 2.0))));
+ *p++ = (bpp <= 6) ? (-8) : ((bpp >= 8) ? (-6) : (-8 + dsc_roundf((bpp - 6) * (2 / 2.0))));
+ *p++ = (bpp <= 6) ? (-10) : ((bpp >= 8) ? (-8) : (-10 + dsc_roundf((bpp - 6) * (2 / 2.0))));
+ *p++ = -10;
+ *p++ = (bpp <= 4) ? (-12) : ((bpp >= 5) ? (-10) : (-12 + dsc_roundf((bpp - 4) * (2 / 1.0))));
+ *p++ = -12;
+ *p++ = -12;
+ *p++ = -12;
+ }
+}
+
+void _do_calc_rc_params(struct rc_params *rc,
+ enum colour_mode cm,
+ enum bits_per_comp bpc,
+ u16 drm_bpp,
+ bool is_navite_422_or_420,
+ int slice_width,
+ int slice_height,
+ int minor_version)
+{
+ float bpp;
+ float bpp_group;
+ float initial_xmit_delay_factor;
+ int padding_pixels;
+ int i;
+
+ dc_assert_fp_enabled();
+
+ bpp = ((float)drm_bpp / 16.0);
+ /* in native_422 or native_420 modes, the bits_per_pixel is double the
+ * target bpp (the latter is what calc_rc_params expects)
+ */
+ if (is_navite_422_or_420)
+ bpp /= 2.0;
+
+ rc->rc_quant_incr_limit0 = ((bpc == BPC_8) ? 11 : (bpc == BPC_10 ? 15 : 19)) - ((minor_version == 1 && cm == CM_444) ? 1 : 0);
+ rc->rc_quant_incr_limit1 = ((bpc == BPC_8) ? 11 : (bpc == BPC_10 ? 15 : 19)) - ((minor_version == 1 && cm == CM_444) ? 1 : 0);
+
+ bpp_group = MODE_SELECT(bpp, bpp * 2.0, bpp * 2.0);
+
+ switch (cm) {
+ case CM_420:
+ rc->initial_fullness_offset = (bpp >= 6) ? (2048) : ((bpp <= 4) ? (6144) : ((((bpp > 4) && (bpp <= 5))) ? (6144 - dsc_roundf((bpp - 4) * (512))) : (5632 - dsc_roundf((bpp - 5) * (3584)))));
+ rc->first_line_bpg_offset = median3(0, (12 + (int) (0.09 * min(34, slice_height - 8))), (int)((3 * bpc * 3) - (3 * bpp_group)));
+ rc->second_line_bpg_offset = median3(0, 12, (int)((3 * bpc * 3) - (3 * bpp_group)));
+ break;
+ case CM_422:
+ rc->initial_fullness_offset = (bpp >= 8) ? (2048) : ((bpp <= 7) ? (5632) : (5632 - dsc_roundf((bpp - 7) * (3584))));
+ rc->first_line_bpg_offset = median3(0, (12 + (int) (0.09 * min(34, slice_height - 8))), (int)((3 * bpc * 4) - (3 * bpp_group)));
+ rc->second_line_bpg_offset = 0;
+ break;
+ case CM_444:
+ case CM_RGB:
+ rc->initial_fullness_offset = (bpp >= 12) ? (2048) : ((bpp <= 8) ? (6144) : ((((bpp > 8) && (bpp <= 10))) ? (6144 - dsc_roundf((bpp - 8) * (512 / 2))) : (5632 - dsc_roundf((bpp - 10) * (3584 / 2)))));
+ rc->first_line_bpg_offset = median3(0, (12 + (int) (0.09 * min(34, slice_height - 8))), (int)(((3 * bpc + (cm == CM_444 ? 0 : 2)) * 3) - (3 * bpp_group)));
+ rc->second_line_bpg_offset = 0;
+ break;
+ }
+
+ initial_xmit_delay_factor = (cm == CM_444 || cm == CM_RGB) ? 1.0 : 2.0;
+ rc->initial_xmit_delay = dsc_roundf(8192.0/2.0/bpp/initial_xmit_delay_factor);
+
+ if (cm == CM_422 || cm == CM_420)
+ slice_width /= 2;
+
+ padding_pixels = ((slice_width % 3) != 0) ? (3 - (slice_width % 3)) * (rc->initial_xmit_delay / slice_width) : 0;
+ if (3 * bpp_group >= (((rc->initial_xmit_delay + 2) / 3) * (3 + (cm == CM_422)))) {
+ if ((rc->initial_xmit_delay + padding_pixels) % 3 == 1)
+ rc->initial_xmit_delay++;
+ }
+
+ rc->flatness_min_qp = ((bpc == BPC_8) ? (3) : ((bpc == BPC_10) ? (7) : (11))) - ((minor_version == 1 && cm == CM_444) ? 1 : 0);
+ rc->flatness_max_qp = ((bpc == BPC_8) ? (12) : ((bpc == BPC_10) ? (16) : (20))) - ((minor_version == 1 && cm == CM_444) ? 1 : 0);
+ rc->flatness_det_thresh = 2 << (bpc - 8);
+
+ get_qp_set(rc->qp_min, cm, bpc, DAL_MM_MIN, bpp);
+ get_qp_set(rc->qp_max, cm, bpc, DAL_MM_MAX, bpp);
+ if (cm == CM_444 && minor_version == 1) {
+ for (i = 0; i < QP_SET_SIZE; ++i) {
+ rc->qp_min[i] = rc->qp_min[i] > 0 ? rc->qp_min[i] - 1 : 0;
+ rc->qp_max[i] = rc->qp_max[i] > 0 ? rc->qp_max[i] - 1 : 0;
+ }
+ }
+ get_ofs_set(rc->ofs, cm, bpp);
+
+ /* fixed parameters */
+ rc->rc_model_size = 8192;
+ rc->rc_edge_factor = 6;
+ rc->rc_tgt_offset_hi = 3;
+ rc->rc_tgt_offset_lo = 3;
+
+ rc->rc_buf_thresh[0] = 896;
+ rc->rc_buf_thresh[1] = 1792;
+ rc->rc_buf_thresh[2] = 2688;
+ rc->rc_buf_thresh[3] = 3584;
+ rc->rc_buf_thresh[4] = 4480;
+ rc->rc_buf_thresh[5] = 5376;
+ rc->rc_buf_thresh[6] = 6272;
+ rc->rc_buf_thresh[7] = 6720;
+ rc->rc_buf_thresh[8] = 7168;
+ rc->rc_buf_thresh[9] = 7616;
+ rc->rc_buf_thresh[10] = 7744;
+ rc->rc_buf_thresh[11] = 7872;
+ rc->rc_buf_thresh[12] = 8000;
+ rc->rc_buf_thresh[13] = 8064;
+}
+
+u32 _do_bytes_per_pixel_calc(int slice_width,
+ u16 drm_bpp,
+ bool is_navite_422_or_420)
+{
+ float bpp;
+ u32 bytes_per_pixel;
+ double d_bytes_per_pixel;
+
+ dc_assert_fp_enabled();
+
+ bpp = ((float)drm_bpp / 16.0);
+ d_bytes_per_pixel = dsc_ceil(bpp * slice_width / 8.0) / slice_width;
+ // TODO: Make sure the formula for calculating this is precise (ceiling
+ // vs. floor, and at what point they should be applied)
+ if (is_navite_422_or_420)
+ d_bytes_per_pixel /= 2;
+
+ bytes_per_pixel = (u32)dsc_ceil(d_bytes_per_pixel * 0x10000000);
+
+ return bytes_per_pixel;
+}
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dsc/rc_calc_fpu.h b/drivers/gpu/drm/amd/display/dc/dml/dsc/rc_calc_fpu.h
new file mode 100644
index 000000000000..b93b95409fbe
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dml/dsc/rc_calc_fpu.h
@@ -0,0 +1,94 @@
+/*
+ * Copyright 2021 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef __RC_CALC_FPU_H__
+#define __RC_CALC_FPU_H__
+
+#include "os_types.h"
+#include <drm/drm_dsc.h>
+
+#define QP_SET_SIZE 15
+
+typedef int qp_set[QP_SET_SIZE];
+
+struct rc_params {
+ int rc_quant_incr_limit0;
+ int rc_quant_incr_limit1;
+ int initial_fullness_offset;
+ int initial_xmit_delay;
+ int first_line_bpg_offset;
+ int second_line_bpg_offset;
+ int flatness_min_qp;
+ int flatness_max_qp;
+ int flatness_det_thresh;
+ qp_set qp_min;
+ qp_set qp_max;
+ qp_set ofs;
+ int rc_model_size;
+ int rc_edge_factor;
+ int rc_tgt_offset_hi;
+ int rc_tgt_offset_lo;
+ int rc_buf_thresh[QP_SET_SIZE - 1];
+};
+
+enum colour_mode {
+ CM_RGB, /* 444 RGB */
+ CM_444, /* 444 YUV or simple 422 */
+ CM_422, /* native 422 */
+ CM_420 /* native 420 */
+};
+
+enum bits_per_comp {
+ BPC_8 = 8,
+ BPC_10 = 10,
+ BPC_12 = 12
+};
+
+enum max_min {
+ DAL_MM_MIN = 0,
+ DAL_MM_MAX = 1
+};
+
+struct qp_entry {
+ float bpp;
+ const qp_set qps;
+};
+
+typedef struct qp_entry qp_table[];
+
+u32 _do_bytes_per_pixel_calc(int slice_width,
+ u16 drm_bpp,
+ bool is_navite_422_or_420);
+
+void _do_calc_rc_params(struct rc_params *rc,
+ enum colour_mode cm,
+ enum bits_per_comp bpc,
+ u16 drm_bpp,
+ bool is_navite_422_or_420,
+ int slice_width,
+ int slice_height,
+ int minor_version);
+
+#endif
diff --git a/drivers/gpu/drm/amd/display/dc/dsc/Makefile b/drivers/gpu/drm/amd/display/dc/dsc/Makefile
index 8d31eb75c6a6..a2537229ee88 100644
--- a/drivers/gpu/drm/amd/display/dc/dsc/Makefile
+++ b/drivers/gpu/drm/amd/display/dc/dsc/Makefile
@@ -1,35 +1,6 @@
# SPDX-License-Identifier: MIT
#
# Makefile for the 'dsc' sub-component of DAL.
-
-ifdef CONFIG_X86
-dsc_ccflags := -mhard-float -msse
-endif
-
-ifdef CONFIG_PPC64
-dsc_ccflags := -mhard-float -maltivec
-endif
-
-ifdef CONFIG_CC_IS_GCC
-ifeq ($(call cc-ifversion, -lt, 0701, y), y)
-IS_OLD_GCC = 1
-endif
-endif
-
-ifdef CONFIG_X86
-ifdef IS_OLD_GCC
-# Stack alignment mismatch, proceed with caution.
-# GCC < 7.1 cannot compile code using `double` and -mpreferred-stack-boundary=3
-# (8B stack alignment).
-dsc_ccflags += -mpreferred-stack-boundary=4
-else
-dsc_ccflags += -msse2
-endif
-endif
-
-CFLAGS_$(AMDDALPATH)/dc/dsc/rc_calc.o := $(dsc_ccflags)
-CFLAGS_REMOVE_$(AMDDALPATH)/dc/dsc/rc_calc.o := $(dsc_rcflags)
-
DSC = dc_dsc.o rc_calc.o rc_calc_dpi.o
AMD_DAL_DSC = $(addprefix $(AMDDALPATH)/dc/dsc/,$(DSC))
diff --git a/drivers/gpu/drm/amd/display/dc/dsc/rc_calc.c b/drivers/gpu/drm/amd/display/dc/dsc/rc_calc.c
index 7b294f637881..b19d3aeb5962 100644
--- a/drivers/gpu/drm/amd/display/dc/dsc/rc_calc.c
+++ b/drivers/gpu/drm/amd/display/dc/dsc/rc_calc.c
@@ -23,266 +23,7 @@
* Authors: AMD
*
*/
-#include <drm/drm_dsc.h>
-
-#include "os_types.h"
#include "rc_calc.h"
-#include "qp_tables.h"
-
-#define table_hash(mode, bpc, max_min) ((mode << 16) | (bpc << 8) | max_min)
-
-#define MODE_SELECT(val444, val422, val420) \
- (cm == CM_444 || cm == CM_RGB) ? (val444) : (cm == CM_422 ? (val422) : (val420))
-
-
-#define TABLE_CASE(mode, bpc, max) case (table_hash(mode, BPC_##bpc, max)): \
- table = qp_table_##mode##_##bpc##bpc_##max; \
- table_size = sizeof(qp_table_##mode##_##bpc##bpc_##max)/sizeof(*qp_table_##mode##_##bpc##bpc_##max); \
- break
-
-
-static void get_qp_set(qp_set qps, enum colour_mode cm, enum bits_per_comp bpc,
- enum max_min max_min, float bpp)
-{
- int mode = MODE_SELECT(444, 422, 420);
- int sel = table_hash(mode, bpc, max_min);
- int table_size = 0;
- int index;
- const struct qp_entry *table = 0L;
-
- // alias enum
- enum { min = DAL_MM_MIN, max = DAL_MM_MAX };
- switch (sel) {
- TABLE_CASE(444, 8, max);
- TABLE_CASE(444, 8, min);
- TABLE_CASE(444, 10, max);
- TABLE_CASE(444, 10, min);
- TABLE_CASE(444, 12, max);
- TABLE_CASE(444, 12, min);
- TABLE_CASE(422, 8, max);
- TABLE_CASE(422, 8, min);
- TABLE_CASE(422, 10, max);
- TABLE_CASE(422, 10, min);
- TABLE_CASE(422, 12, max);
- TABLE_CASE(422, 12, min);
- TABLE_CASE(420, 8, max);
- TABLE_CASE(420, 8, min);
- TABLE_CASE(420, 10, max);
- TABLE_CASE(420, 10, min);
- TABLE_CASE(420, 12, max);
- TABLE_CASE(420, 12, min);
- }
-
- if (table == 0)
- return;
-
- index = (bpp - table[0].bpp) * 2;
-
- /* requested size is bigger than the table */
- if (index >= table_size) {
- dm_error("ERROR: Requested rc_calc to find a bpp entry that exceeds the table size\n");
- return;
- }
-
- memcpy(qps, table[index].qps, sizeof(qp_set));
-}
-
-static double dsc_roundf(double num)
-{
- if (num < 0.0)
- num = num - 0.5;
- else
- num = num + 0.5;
-
- return (int)(num);
-}
-
-static double dsc_ceil(double num)
-{
- double retval = (int)num;
-
- if (retval != num && num > 0)
- retval = num + 1;
-
- return (int)retval;
-}
-
-static void get_ofs_set(qp_set ofs, enum colour_mode mode, float bpp)
-{
- int *p = ofs;
-
- if (mode == CM_444 || mode == CM_RGB) {
- *p++ = (bpp <= 6) ? (0) : ((((bpp >= 8) && (bpp <= 12))) ? (2) : ((bpp >= 15) ? (10) : ((((bpp > 6) && (bpp < 8))) ? (0 + dsc_roundf((bpp - 6) * (2 / 2.0))) : (2 + dsc_roundf((bpp - 12) * (8 / 3.0))))));
- *p++ = (bpp <= 6) ? (-2) : ((((bpp >= 8) && (bpp <= 12))) ? (0) : ((bpp >= 15) ? (8) : ((((bpp > 6) && (bpp < 8))) ? (-2 + dsc_roundf((bpp - 6) * (2 / 2.0))) : (0 + dsc_roundf((bpp - 12) * (8 / 3.0))))));
- *p++ = (bpp <= 6) ? (-2) : ((((bpp >= 8) && (bpp <= 12))) ? (0) : ((bpp >= 15) ? (6) : ((((bpp > 6) && (bpp < 8))) ? (-2 + dsc_roundf((bpp - 6) * (2 / 2.0))) : (0 + dsc_roundf((bpp - 12) * (6 / 3.0))))));
- *p++ = (bpp <= 6) ? (-4) : ((((bpp >= 8) && (bpp <= 12))) ? (-2) : ((bpp >= 15) ? (4) : ((((bpp > 6) && (bpp < 8))) ? (-4 + dsc_roundf((bpp - 6) * (2 / 2.0))) : (-2 + dsc_roundf((bpp - 12) * (6 / 3.0))))));
- *p++ = (bpp <= 6) ? (-6) : ((((bpp >= 8) && (bpp <= 12))) ? (-4) : ((bpp >= 15) ? (2) : ((((bpp > 6) && (bpp < 8))) ? (-6 + dsc_roundf((bpp - 6) * (2 / 2.0))) : (-4 + dsc_roundf((bpp - 12) * (6 / 3.0))))));
- *p++ = (bpp <= 12) ? (-6) : ((bpp >= 15) ? (0) : (-6 + dsc_roundf((bpp - 12) * (6 / 3.0))));
- *p++ = (bpp <= 12) ? (-8) : ((bpp >= 15) ? (-2) : (-8 + dsc_roundf((bpp - 12) * (6 / 3.0))));
- *p++ = (bpp <= 12) ? (-8) : ((bpp >= 15) ? (-4) : (-8 + dsc_roundf((bpp - 12) * (4 / 3.0))));
- *p++ = (bpp <= 12) ? (-8) : ((bpp >= 15) ? (-6) : (-8 + dsc_roundf((bpp - 12) * (2 / 3.0))));
- *p++ = (bpp <= 12) ? (-10) : ((bpp >= 15) ? (-8) : (-10 + dsc_roundf((bpp - 12) * (2 / 3.0))));
- *p++ = -10;
- *p++ = (bpp <= 6) ? (-12) : ((bpp >= 8) ? (-10) : (-12 + dsc_roundf((bpp - 6) * (2 / 2.0))));
- *p++ = -12;
- *p++ = -12;
- *p++ = -12;
- } else if (mode == CM_422) {
- *p++ = (bpp <= 8) ? (2) : ((bpp >= 10) ? (10) : (2 + dsc_roundf((bpp - 8) * (8 / 2.0))));
- *p++ = (bpp <= 8) ? (0) : ((bpp >= 10) ? (8) : (0 + dsc_roundf((bpp - 8) * (8 / 2.0))));
- *p++ = (bpp <= 8) ? (0) : ((bpp >= 10) ? (6) : (0 + dsc_roundf((bpp - 8) * (6 / 2.0))));
- *p++ = (bpp <= 8) ? (-2) : ((bpp >= 10) ? (4) : (-2 + dsc_roundf((bpp - 8) * (6 / 2.0))));
- *p++ = (bpp <= 8) ? (-4) : ((bpp >= 10) ? (2) : (-4 + dsc_roundf((bpp - 8) * (6 / 2.0))));
- *p++ = (bpp <= 8) ? (-6) : ((bpp >= 10) ? (0) : (-6 + dsc_roundf((bpp - 8) * (6 / 2.0))));
- *p++ = (bpp <= 8) ? (-8) : ((bpp >= 10) ? (-2) : (-8 + dsc_roundf((bpp - 8) * (6 / 2.0))));
- *p++ = (bpp <= 8) ? (-8) : ((bpp >= 10) ? (-4) : (-8 + dsc_roundf((bpp - 8) * (4 / 2.0))));
- *p++ = (bpp <= 8) ? (-8) : ((bpp >= 10) ? (-6) : (-8 + dsc_roundf((bpp - 8) * (2 / 2.0))));
- *p++ = (bpp <= 8) ? (-10) : ((bpp >= 10) ? (-8) : (-10 + dsc_roundf((bpp - 8) * (2 / 2.0))));
- *p++ = -10;
- *p++ = (bpp <= 6) ? (-12) : ((bpp >= 7) ? (-10) : (-12 + dsc_roundf((bpp - 6) * (2.0 / 1))));
- *p++ = -12;
- *p++ = -12;
- *p++ = -12;
- } else {
- *p++ = (bpp <= 6) ? (2) : ((bpp >= 8) ? (10) : (2 + dsc_roundf((bpp - 6) * (8 / 2.0))));
- *p++ = (bpp <= 6) ? (0) : ((bpp >= 8) ? (8) : (0 + dsc_roundf((bpp - 6) * (8 / 2.0))));
- *p++ = (bpp <= 6) ? (0) : ((bpp >= 8) ? (6) : (0 + dsc_roundf((bpp - 6) * (6 / 2.0))));
- *p++ = (bpp <= 6) ? (-2) : ((bpp >= 8) ? (4) : (-2 + dsc_roundf((bpp - 6) * (6 / 2.0))));
- *p++ = (bpp <= 6) ? (-4) : ((bpp >= 8) ? (2) : (-4 + dsc_roundf((bpp - 6) * (6 / 2.0))));
- *p++ = (bpp <= 6) ? (-6) : ((bpp >= 8) ? (0) : (-6 + dsc_roundf((bpp - 6) * (6 / 2.0))));
- *p++ = (bpp <= 6) ? (-8) : ((bpp >= 8) ? (-2) : (-8 + dsc_roundf((bpp - 6) * (6 / 2.0))));
- *p++ = (bpp <= 6) ? (-8) : ((bpp >= 8) ? (-4) : (-8 + dsc_roundf((bpp - 6) * (4 / 2.0))));
- *p++ = (bpp <= 6) ? (-8) : ((bpp >= 8) ? (-6) : (-8 + dsc_roundf((bpp - 6) * (2 / 2.0))));
- *p++ = (bpp <= 6) ? (-10) : ((bpp >= 8) ? (-8) : (-10 + dsc_roundf((bpp - 6) * (2 / 2.0))));
- *p++ = -10;
- *p++ = (bpp <= 4) ? (-12) : ((bpp >= 5) ? (-10) : (-12 + dsc_roundf((bpp - 4) * (2 / 1.0))));
- *p++ = -12;
- *p++ = -12;
- *p++ = -12;
- }
-}
-
-static int median3(int a, int b, int c)
-{
- if (a > b)
- swap(a, b);
- if (b > c)
- swap(b, c);
- if (a > b)
- swap(b, c);
-
- return b;
-}
-
-static void _do_calc_rc_params(struct rc_params *rc, enum colour_mode cm,
- enum bits_per_comp bpc, u16 drm_bpp,
- bool is_navite_422_or_420,
- int slice_width, int slice_height,
- int minor_version)
-{
- float bpp;
- float bpp_group;
- float initial_xmit_delay_factor;
- int padding_pixels;
- int i;
-
- bpp = ((float)drm_bpp / 16.0);
- /* in native_422 or native_420 modes, the bits_per_pixel is double the
- * target bpp (the latter is what calc_rc_params expects)
- */
- if (is_navite_422_or_420)
- bpp /= 2.0;
-
- rc->rc_quant_incr_limit0 = ((bpc == BPC_8) ? 11 : (bpc == BPC_10 ? 15 : 19)) - ((minor_version == 1 && cm == CM_444) ? 1 : 0);
- rc->rc_quant_incr_limit1 = ((bpc == BPC_8) ? 11 : (bpc == BPC_10 ? 15 : 19)) - ((minor_version == 1 && cm == CM_444) ? 1 : 0);
-
- bpp_group = MODE_SELECT(bpp, bpp * 2.0, bpp * 2.0);
-
- switch (cm) {
- case CM_420:
- rc->initial_fullness_offset = (bpp >= 6) ? (2048) : ((bpp <= 4) ? (6144) : ((((bpp > 4) && (bpp <= 5))) ? (6144 - dsc_roundf((bpp - 4) * (512))) : (5632 - dsc_roundf((bpp - 5) * (3584)))));
- rc->first_line_bpg_offset = median3(0, (12 + (int) (0.09 * min(34, slice_height - 8))), (int)((3 * bpc * 3) - (3 * bpp_group)));
- rc->second_line_bpg_offset = median3(0, 12, (int)((3 * bpc * 3) - (3 * bpp_group)));
- break;
- case CM_422:
- rc->initial_fullness_offset = (bpp >= 8) ? (2048) : ((bpp <= 7) ? (5632) : (5632 - dsc_roundf((bpp - 7) * (3584))));
- rc->first_line_bpg_offset = median3(0, (12 + (int) (0.09 * min(34, slice_height - 8))), (int)((3 * bpc * 4) - (3 * bpp_group)));
- rc->second_line_bpg_offset = 0;
- break;
- case CM_444:
- case CM_RGB:
- rc->initial_fullness_offset = (bpp >= 12) ? (2048) : ((bpp <= 8) ? (6144) : ((((bpp > 8) && (bpp <= 10))) ? (6144 - dsc_roundf((bpp - 8) * (512 / 2))) : (5632 - dsc_roundf((bpp - 10) * (3584 / 2)))));
- rc->first_line_bpg_offset = median3(0, (12 + (int) (0.09 * min(34, slice_height - 8))), (int)(((3 * bpc + (cm == CM_444 ? 0 : 2)) * 3) - (3 * bpp_group)));
- rc->second_line_bpg_offset = 0;
- break;
- }
-
- initial_xmit_delay_factor = (cm == CM_444 || cm == CM_RGB) ? 1.0 : 2.0;
- rc->initial_xmit_delay = dsc_roundf(8192.0/2.0/bpp/initial_xmit_delay_factor);
-
- if (cm == CM_422 || cm == CM_420)
- slice_width /= 2;
-
- padding_pixels = ((slice_width % 3) != 0) ? (3 - (slice_width % 3)) * (rc->initial_xmit_delay / slice_width) : 0;
- if (3 * bpp_group >= (((rc->initial_xmit_delay + 2) / 3) * (3 + (cm == CM_422)))) {
- if ((rc->initial_xmit_delay + padding_pixels) % 3 == 1)
- rc->initial_xmit_delay++;
- }
-
- rc->flatness_min_qp = ((bpc == BPC_8) ? (3) : ((bpc == BPC_10) ? (7) : (11))) - ((minor_version == 1 && cm == CM_444) ? 1 : 0);
- rc->flatness_max_qp = ((bpc == BPC_8) ? (12) : ((bpc == BPC_10) ? (16) : (20))) - ((minor_version == 1 && cm == CM_444) ? 1 : 0);
- rc->flatness_det_thresh = 2 << (bpc - 8);
-
- get_qp_set(rc->qp_min, cm, bpc, DAL_MM_MIN, bpp);
- get_qp_set(rc->qp_max, cm, bpc, DAL_MM_MAX, bpp);
- if (cm == CM_444 && minor_version == 1) {
- for (i = 0; i < QP_SET_SIZE; ++i) {
- rc->qp_min[i] = rc->qp_min[i] > 0 ? rc->qp_min[i] - 1 : 0;
- rc->qp_max[i] = rc->qp_max[i] > 0 ? rc->qp_max[i] - 1 : 0;
- }
- }
- get_ofs_set(rc->ofs, cm, bpp);
-
- /* fixed parameters */
- rc->rc_model_size = 8192;
- rc->rc_edge_factor = 6;
- rc->rc_tgt_offset_hi = 3;
- rc->rc_tgt_offset_lo = 3;
-
- rc->rc_buf_thresh[0] = 896;
- rc->rc_buf_thresh[1] = 1792;
- rc->rc_buf_thresh[2] = 2688;
- rc->rc_buf_thresh[3] = 3584;
- rc->rc_buf_thresh[4] = 4480;
- rc->rc_buf_thresh[5] = 5376;
- rc->rc_buf_thresh[6] = 6272;
- rc->rc_buf_thresh[7] = 6720;
- rc->rc_buf_thresh[8] = 7168;
- rc->rc_buf_thresh[9] = 7616;
- rc->rc_buf_thresh[10] = 7744;
- rc->rc_buf_thresh[11] = 7872;
- rc->rc_buf_thresh[12] = 8000;
- rc->rc_buf_thresh[13] = 8064;
-}
-
-static u32 _do_bytes_per_pixel_calc(int slice_width, u16 drm_bpp,
- bool is_navite_422_or_420)
-{
- float bpp;
- u32 bytes_per_pixel;
- double d_bytes_per_pixel;
-
- bpp = ((float)drm_bpp / 16.0);
- d_bytes_per_pixel = dsc_ceil(bpp * slice_width / 8.0) / slice_width;
- // TODO: Make sure the formula for calculating this is precise (ceiling
- // vs. floor, and at what point they should be applied)
- if (is_navite_422_or_420)
- d_bytes_per_pixel /= 2;
-
- bytes_per_pixel = (u32)dsc_ceil(d_bytes_per_pixel * 0x10000000);
-
- return bytes_per_pixel;
-}
/**
* calc_rc_params - reads the user's cmdline mode
diff --git a/drivers/gpu/drm/amd/display/dc/dsc/rc_calc.h b/drivers/gpu/drm/amd/display/dc/dsc/rc_calc.h
index 262f06afcbf9..c2340e001b57 100644
--- a/drivers/gpu/drm/amd/display/dc/dsc/rc_calc.h
+++ b/drivers/gpu/drm/amd/display/dc/dsc/rc_calc.h
@@ -27,55 +27,7 @@
#ifndef __RC_CALC_H__
#define __RC_CALC_H__
-
-#define QP_SET_SIZE 15
-
-typedef int qp_set[QP_SET_SIZE];
-
-struct rc_params {
- int rc_quant_incr_limit0;
- int rc_quant_incr_limit1;
- int initial_fullness_offset;
- int initial_xmit_delay;
- int first_line_bpg_offset;
- int second_line_bpg_offset;
- int flatness_min_qp;
- int flatness_max_qp;
- int flatness_det_thresh;
- qp_set qp_min;
- qp_set qp_max;
- qp_set ofs;
- int rc_model_size;
- int rc_edge_factor;
- int rc_tgt_offset_hi;
- int rc_tgt_offset_lo;
- int rc_buf_thresh[QP_SET_SIZE - 1];
-};
-
-enum colour_mode {
- CM_RGB, /* 444 RGB */
- CM_444, /* 444 YUV or simple 422 */
- CM_422, /* native 422 */
- CM_420 /* native 420 */
-};
-
-enum bits_per_comp {
- BPC_8 = 8,
- BPC_10 = 10,
- BPC_12 = 12
-};
-
-enum max_min {
- DAL_MM_MIN = 0,
- DAL_MM_MAX = 1
-};
-
-struct qp_entry {
- float bpp;
- const qp_set qps;
-};
-
-typedef struct qp_entry qp_table[];
+#include "dml/dsc/rc_calc_fpu.h"
void calc_rc_params(struct rc_params *rc, const struct drm_dsc_config *pps);
u32 calc_dsc_bytes_per_pixel(const struct drm_dsc_config *pps);
diff --git a/drivers/gpu/drm/amd/display/dc/dsc/rc_calc_dpi.c b/drivers/gpu/drm/amd/display/dc/dsc/rc_calc_dpi.c
index ef830aded5b1..1e19dd674e5a 100644
--- a/drivers/gpu/drm/amd/display/dc/dsc/rc_calc_dpi.c
+++ b/drivers/gpu/drm/amd/display/dc/dsc/rc_calc_dpi.c
@@ -22,7 +22,6 @@
* Authors: AMD
*
*/
-#include "os_types.h"
#include <drm/drm_dsc.h>
#include "dscc_types.h"
#include "rc_calc.h"
diff --git a/drivers/gpu/drm/amd/display/dc/inc/dc_link_dp.h b/drivers/gpu/drm/amd/display/dc/inc/dc_link_dp.h
index c70375117519..a6d3d859754a 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/dc_link_dp.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/dc_link_dp.h
@@ -121,12 +121,12 @@ enum dc_status dpcd_set_lane_settings(
const struct link_training_settings *link_training_setting,
uint32_t offset);
/* Read training status and adjustment requests from DPCD. */
-enum dc_status dp_get_lane_status_and_drive_settings(
+enum dc_status dp_get_lane_status_and_lane_adjust(
struct dc_link *link,
const struct link_training_settings *link_training_setting,
- union lane_status *ln_status,
- union lane_align_status_updated *ln_status_updated,
- struct link_training_settings *req_settings,
+ union lane_status ln_status[LANE_COUNT_DP_MAX],
+ union lane_align_status_updated *ln_align,
+ union lane_adjust ln_adjust[LANE_COUNT_DP_MAX],
uint32_t offset);
void dp_wait_for_training_aux_rd_interval(
@@ -151,9 +151,11 @@ void dp_hw_to_dpcd_lane_settings(
const struct link_training_settings *lt_settings,
const struct dc_lane_settings hw_lane_settings[LANE_COUNT_DP_MAX],
union dpcd_training_lane dpcd_lane_settings[LANE_COUNT_DP_MAX]);
-void dp_update_drive_settings(
- struct link_training_settings *dest,
- struct link_training_settings src);
+void dp_decide_lane_settings(
+ const struct link_training_settings *lt_settings,
+ const union lane_adjust ln_adjust[LANE_COUNT_DP_MAX],
+ struct dc_lane_settings hw_lane_settings[LANE_COUNT_DP_MAX],
+ union dpcd_training_lane dpcd_lane_settings[LANE_COUNT_DP_MAX]);
uint32_t dp_translate_training_aux_read_interval(uint32_t dpcd_aux_read_interval);
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/dccg.h b/drivers/gpu/drm/amd/display/dc/inc/hw/dccg.h
index 09237d5819f4..c940fdfda144 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/dccg.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/dccg.h
@@ -123,6 +123,15 @@ struct dccg_funcs {
void (*set_dispclk_change_mode)(
struct dccg *dccg,
enum dentist_dispclk_change_mode change_mode);
+
+ void (*disable_dsc)(
+ struct dccg *dccg,
+ int inst);
+
+ void (*enable_dsc)(
+ struct dccg *dccg,
+ int inst);
+
};
#endif //__DAL_DCCG_H__
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/dpp.h b/drivers/gpu/drm/amd/display/dc/inc/hw/dpp.h
index 9f12792b7e59..3ef7faa92052 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/dpp.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/dpp.h
@@ -34,6 +34,8 @@ union defer_reg_writes {
bool disable_blnd_lut:1;
bool disable_3dlut:1;
bool disable_shaper:1;
+ bool disable_gamcor:1;
+ bool disable_dscl:1;
} bits;
uint32_t raw;
};
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/mpc.h b/drivers/gpu/drm/amd/display/dc/inc/hw/mpc.h
index 04d6ec3f021f..f5fd2a067323 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/mpc.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/mpc.h
@@ -367,6 +367,7 @@ struct mpc_funcs {
void (*set_bg_color)(struct mpc *mpc,
struct tg_color *bg_color,
int mpcc_id);
+ void (*set_mpc_mem_lp_mode)(struct mpc *mpc);
};
#endif
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer_private.h b/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer_private.h
index f324285394be..c2008258c50a 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer_private.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer_private.h
@@ -143,6 +143,7 @@ struct hwseq_private_funcs {
const struct dc_plane_state *plane_state);
void (*PLAT_58856_wa)(struct dc_state *context,
struct pipe_ctx *pipe_ctx);
+ void (*setup_hpo_hw_control)(const struct dce_hwseq *hws, bool enable);
};
struct dce_hwseq {
diff --git a/drivers/gpu/drm/amd/display/dc/inc/link_enc_cfg.h b/drivers/gpu/drm/amd/display/dc/inc/link_enc_cfg.h
index 83b2199b2c83..10dcf6a5e9b1 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/link_enc_cfg.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/link_enc_cfg.h
@@ -97,7 +97,7 @@ struct link_encoder *link_enc_cfg_get_link_enc_used_by_stream(
const struct dc_stream_state *stream);
/* Return true if encoder available to use. */
-bool link_enc_cfg_is_link_enc_avail(struct dc *dc, enum engine_id eng_id);
+bool link_enc_cfg_is_link_enc_avail(struct dc *dc, enum engine_id eng_id, struct dc_link *link);
/* Returns true if encoder assignments in supplied state pass validity checks. */
bool link_enc_cfg_validate(struct dc *dc, struct dc_state *state);