summaryrefslogtreecommitdiff
path: root/drivers/gpu/drm/amd/display
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/drm/amd/display')
-rw-r--r--drivers/gpu/drm/amd/display/Kconfig8
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c100
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h2
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_color.c10
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c8
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.c43
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c20
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c19
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_pp_smu.c30
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_services.c25
-rw-r--r--drivers/gpu/drm/amd/display/dc/Makefile4
-rw-r--r--drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c204
-rw-r--r--drivers/gpu/drm/amd/display/dc/bios/command_table_helper2.c5
-rw-r--r--drivers/gpu/drm/amd/display/dc/bios/dce112/command_table_helper2_dce112.c40
-rw-r--r--drivers/gpu/drm/amd/display/dc/bios/dce112/command_table_helper2_dce112.h3
-rw-r--r--drivers/gpu/drm/amd/display/dc/calcs/dcn_calc_auto.c6
-rw-r--r--drivers/gpu/drm/amd/display/dc/calcs/dcn_calcs.c18
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/Makefile10
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/clk_mgr.c22
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn20/dcn20_clk_mgr.c18
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr.c3
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn30/dalsmc.h60
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn30/dcn30_clk_mgr.c543
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn30/dcn30_clk_mgr.h (renamed from drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd_vbios.h)25
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn30/dcn30_clk_mgr_smu_msg.c255
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn30/dcn30_clk_mgr_smu_msg.h108
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc.c137
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_debug.c59
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_link.c43
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_link_ddc.c42
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c163
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_link_hwss.c8
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_resource.c40
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_stream.c37
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_vm_helper.c3
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc.h65
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_bios_types.h5
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c28
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_dmub_srv.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_hw_types.h75
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_link.h6
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_stream.h15
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_types.h36
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/Makefile3
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_abm.h20
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_audio.c4
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_clock_source.c153
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_clock_source.h42
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_hwseq.h46
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_panel_cntl.c10
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dmub_abm.c92
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dmub_hw_lock_mgr.c (renamed from drivers/gpu/drm/amd/display/dmub/inc/dmub_types.h)78
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dmub_hw_lock_mgr.h39
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dmub_psr.c3
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c28
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.h1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.c12
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c37
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_init.c1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_link_encoder.c7
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_link_encoder.h6
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.c13
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.h62
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_stream_encoder.h42
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dccg.h22
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dpp.c4
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dsc.c17
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dsc.h12
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubbub.c9
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubp.c20
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubp.h41
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c134
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn20/dcn20_init.c1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn20/dcn20_link_encoder.h7
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c93
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hubp.c121
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hwseq.c89
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hwseq.h6
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn21/dcn21_init.c5
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c4
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn30/Makefile54
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn30/dcn30_afmt.c206
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn30/dcn30_afmt.h230
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn30/dcn30_cm_common.c640
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn30/dcn30_cm_common.h78
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dccg.c100
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dccg.h66
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dio_link_encoder.c205
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dio_link_encoder.h76
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dio_stream_encoder.c851
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dio_stream_encoder.h269
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dpp.c1414
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dpp.h608
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dpp_cm.c410
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dwb.c264
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dwb.h923
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dwb_cm.c354
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hubbub.c417
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hubbub.h119
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hubp.c532
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hubp.h292
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hwseq.c719
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hwseq.h70
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn30/dcn30_init.c141
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn30/dcn30_init.h33
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn30/dcn30_mmhubbub.c239
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn30/dcn30_mmhubbub.h463
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn30/dcn30_mpc.c1409
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn30/dcn30_mpc.h665
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn30/dcn30_opp.h36
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn30/dcn30_optc.c365
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn30/dcn30_optc.h333
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn30/dcn30_resource.c2691
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn30/dcn30_resource.h82
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn30/dcn30_vpg.c194
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn30/dcn30_vpg.h133
-rw-r--r--drivers/gpu/drm/amd/display/dc/dm_cp_psp.h1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dm_helpers.h23
-rw-r--r--drivers/gpu/drm/amd/display/dc/dm_services.h69
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/Makefile7
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn20/display_rq_dlg_calc_20.c33
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn20/display_rq_dlg_calc_20v2.c33
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn21/display_rq_dlg_calc_21.c36
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn30/display_mode_vba_30.c6865
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn30/display_mode_vba_30.h43
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn30/display_rq_dlg_calc_30.c1868
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn30/display_rq_dlg_calc_30.h69
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/display_mode_enums.h6
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/display_mode_lib.c181
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/display_mode_lib.h11
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/display_mode_structs.h14
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/display_mode_vba.c67
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/display_mode_vba.h230
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dml_inline_defs.h18
-rw-r--r--drivers/gpu/drm/amd/display/dc/dsc/rc_calc.c8
-rw-r--r--drivers/gpu/drm/amd/display/dc/gpio/Makefile10
-rw-r--r--drivers/gpu/drm/amd/display/dc/gpio/dcn30/hw_factory_dcn30.c257
-rw-r--r--drivers/gpu/drm/amd/display/dc/gpio/dcn30/hw_factory_dcn30.h33
-rw-r--r--drivers/gpu/drm/amd/display/dc/gpio/dcn30/hw_translate_dcn30.c387
-rw-r--r--drivers/gpu/drm/amd/display/dc/gpio/dcn30/hw_translate_dcn30.h35
-rw-r--r--drivers/gpu/drm/amd/display/dc/gpio/hw_factory.c9
-rw-r--r--drivers/gpu/drm/amd/display/dc/gpio/hw_translate.c8
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/core_status.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/core_types.h31
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/dc_link_ddc.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/dc_link_dp.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr.h75
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr_internal.h14
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/dchubbub.h6
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/dpp.h7
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/dwb.h83
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/hubp.h7
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/hw_shared.h9
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/mcif_wb.h8
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/mpc.h108
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/stream_encoder.h4
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/timing_generator.h19
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h6
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/resource.h3
-rw-r--r--drivers/gpu/drm/amd/display/dc/irq/Makefile10
-rw-r--r--drivers/gpu/drm/amd/display/dc/irq/dcn30/irq_service_dcn30.c384
-rw-r--r--drivers/gpu/drm/amd/display/dc/irq/dcn30/irq_service_dcn30.h37
-rw-r--r--drivers/gpu/drm/amd/display/dc/os_types.h10
-rw-r--r--drivers/gpu/drm/amd/display/dmub/dmub_srv.h27
-rw-r--r--drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h500
-rw-r--r--drivers/gpu/drm/amd/display/dmub/inc/dmub_gpint_cmd.h75
-rw-r--r--drivers/gpu/drm/amd/display/dmub/inc/dmub_rb.h152
-rw-r--r--drivers/gpu/drm/amd/display/dmub/src/Makefile3
-rw-r--r--drivers/gpu/drm/amd/display/dmub/src/dmub_dcn20.c49
-rw-r--r--drivers/gpu/drm/amd/display/dmub/src/dmub_dcn20.h6
-rw-r--r--drivers/gpu/drm/amd/display/dmub/src/dmub_dcn21.c10
-rw-r--r--drivers/gpu/drm/amd/display/dmub/src/dmub_dcn21.h6
-rw-r--r--drivers/gpu/drm/amd/display/dmub/src/dmub_dcn30.c184
-rw-r--r--drivers/gpu/drm/amd/display/dmub/src/dmub_dcn30.h (renamed from drivers/gpu/drm/amd/display/dmub/inc/dmub_fw_meta.h)51
-rw-r--r--drivers/gpu/drm/amd/display/dmub/src/dmub_reg.h2
-rw-r--r--drivers/gpu/drm/amd/display/dmub/src/dmub_srv.c91
-rw-r--r--drivers/gpu/drm/amd/display/include/dal_asic_id.h4
-rw-r--r--drivers/gpu/drm/amd/display/include/dal_types.h1
-rw-r--r--drivers/gpu/drm/amd/display/include/grph_object_ctrl_defs.h5
-rw-r--r--drivers/gpu/drm/amd/display/modules/color/Makefile2
-rw-r--r--drivers/gpu/drm/amd/display/modules/color/color_gamma.c115
-rw-r--r--drivers/gpu/drm/amd/display/modules/color/color_gamma.h18
-rw-r--r--drivers/gpu/drm/amd/display/modules/color/color_table.c48
-rw-r--r--drivers/gpu/drm/amd/display/modules/color/color_table.h (renamed from drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd_dal.h)43
-rw-r--r--drivers/gpu/drm/amd/display/modules/inc/mod_stats.h8
-rw-r--r--drivers/gpu/drm/amd/display/modules/power/power_helpers.c96
186 files changed, 30607 insertions, 1355 deletions
diff --git a/drivers/gpu/drm/amd/display/Kconfig b/drivers/gpu/drm/amd/display/Kconfig
index 1911a34cc060..34ae4f3a32f4 100644
--- a/drivers/gpu/drm/amd/display/Kconfig
+++ b/drivers/gpu/drm/amd/display/Kconfig
@@ -17,6 +17,14 @@ config DRM_AMD_DC_DCN
help
Raven, Navi and Renoir family support for display engine
+config DRM_AMD_DC_DCN3_0
+ bool "DCN 3.0 family"
+ depends on DRM_AMD_DC && X86
+ depends on DRM_AMD_DC_DCN
+ help
+ Choose this option if you want to have
+ sienna_cichlid support for display engine
+
config DRM_AMD_DC_HDCP
bool "Enable HDCP support in DC"
depends on DRM_AMD_DC
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
index 7ced9f87be97..9b1f5244ec87 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
@@ -94,6 +94,10 @@
#define FIRMWARE_RENOIR_DMUB "amdgpu/renoir_dmcub.bin"
MODULE_FIRMWARE(FIRMWARE_RENOIR_DMUB);
+#if defined(CONFIG_DRM_AMD_DC_DCN3_0)
+#define FIRMWARE_SIENNA_CICHLID_DMUB "amdgpu/sienna_cichlid_dmcub.bin"
+MODULE_FIRMWARE(FIRMWARE_SIENNA_CICHLID_DMUB);
+#endif
#define FIRMWARE_RAVEN_DMCU "amdgpu/raven_dmcu.bin"
MODULE_FIRMWARE(FIRMWARE_RAVEN_DMCU);
@@ -696,7 +700,7 @@ static void amdgpu_dm_audio_fini(struct amdgpu_device *adev)
adev->mode_info.audio.enabled = false;
}
-void amdgpu_dm_audio_eld_notify(struct amdgpu_device *adev, int pin)
+static void amdgpu_dm_audio_eld_notify(struct amdgpu_device *adev, int pin)
{
struct drm_audio_component *acomp = adev->dm.audio_component;
@@ -1070,6 +1074,9 @@ static int load_dmcu_fw(struct amdgpu_device *adev)
case CHIP_NAVI10:
case CHIP_NAVI14:
case CHIP_RENOIR:
+#if defined(CONFIG_DRM_AMD_DC_DCN3_0)
+ case CHIP_SIENNA_CICHLID:
+#endif
return 0;
case CHIP_NAVI12:
fw_name_dmcu = FIRMWARE_NAVI12_DMCU;
@@ -1166,6 +1173,12 @@ static int dm_dmub_sw_init(struct amdgpu_device *adev)
dmub_asic = DMUB_ASIC_DCN21;
fw_name_dmub = FIRMWARE_RENOIR_DMUB;
break;
+#if defined(CONFIG_DRM_AMD_DC_DCN3_0)
+ case CHIP_SIENNA_CICHLID:
+ dmub_asic = DMUB_ASIC_DCN30;
+ fw_name_dmub = FIRMWARE_SIENNA_CICHLID_DMUB;
+ break;
+#endif
default:
/* ASIC doesn't support DMUB. */
@@ -1228,10 +1241,10 @@ static int dm_dmub_sw_init(struct amdgpu_device *adev)
PSP_HEADER_BYTES - PSP_FOOTER_BYTES;
region_params.bss_data_size = le32_to_cpu(hdr->bss_data_bytes);
region_params.vbios_size = adev->bios_size;
- region_params.fw_bss_data =
+ region_params.fw_bss_data = region_params.bss_data_size ?
adev->dm.dmub_fw->data +
le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
- le32_to_cpu(hdr->inst_const_bytes);
+ le32_to_cpu(hdr->inst_const_bytes) : NULL;
region_params.fw_inst_const =
adev->dm.dmub_fw->data +
le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
@@ -1305,15 +1318,11 @@ static int dm_sw_fini(void *handle)
adev->dm.dmub_srv = NULL;
}
- if (adev->dm.dmub_fw) {
- release_firmware(adev->dm.dmub_fw);
- adev->dm.dmub_fw = NULL;
- }
+ release_firmware(adev->dm.dmub_fw);
+ adev->dm.dmub_fw = NULL;
- if(adev->dm.fw_dmcu) {
- release_firmware(adev->dm.fw_dmcu);
- adev->dm.fw_dmcu = NULL;
- }
+ release_firmware(adev->dm.fw_dmcu);
+ adev->dm.fw_dmcu = NULL;
return 0;
}
@@ -1573,7 +1582,7 @@ static void dm_gpureset_toggle_interrupts(struct amdgpu_device *adev,
}
-enum dc_status amdgpu_dm_commit_zero_streams(struct dc *dc)
+static enum dc_status amdgpu_dm_commit_zero_streams(struct dc *dc)
{
struct dc_state *context = NULL;
enum dc_status res = DC_ERROR_UNEXPECTED;
@@ -2693,7 +2702,7 @@ static int dm_atomic_get_state(struct drm_atomic_state *state,
return 0;
}
-struct dm_atomic_state *
+static struct dm_atomic_state *
dm_atomic_get_new_state(struct drm_atomic_state *state)
{
struct drm_device *dev = state->dev;
@@ -2711,7 +2720,7 @@ dm_atomic_get_new_state(struct drm_atomic_state *state)
return NULL;
}
-struct dm_atomic_state *
+static struct dm_atomic_state *
dm_atomic_get_old_state(struct drm_atomic_state *state)
{
struct drm_device *dev = state->dev;
@@ -3205,6 +3214,9 @@ static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev)
case CHIP_NAVI10:
case CHIP_NAVI14:
case CHIP_RENOIR:
+#if defined(CONFIG_DRM_AMD_DC_DCN3_0)
+ case CHIP_SIENNA_CICHLID:
+#endif
if (dcn10_register_irq_handlers(dm->adev)) {
DRM_ERROR("DM: Failed to initialize IRQ\n");
goto fail;
@@ -3359,6 +3371,9 @@ static int dm_early_init(void *handle)
#endif
case CHIP_NAVI10:
case CHIP_NAVI12:
+#if defined(CONFIG_DRM_AMD_DC_DCN3_0)
+ case CHIP_SIENNA_CICHLID:
+#endif
adev->mode_info.num_crtc = 6;
adev->mode_info.num_hpd = 6;
adev->mode_info.num_dig = 6;
@@ -3679,6 +3694,9 @@ fill_plane_buffer_attributes(struct amdgpu_device *adev,
adev->asic_type == CHIP_NAVI10 ||
adev->asic_type == CHIP_NAVI14 ||
adev->asic_type == CHIP_NAVI12 ||
+#if defined(CONFIG_DRM_AMD_DC_DCN3_0)
+ adev->asic_type == CHIP_SIENNA_CICHLID ||
+#endif
adev->asic_type == CHIP_RENOIR ||
adev->asic_type == CHIP_RAVEN) {
/* Fill GFX9 params */
@@ -3698,6 +3716,11 @@ fill_plane_buffer_attributes(struct amdgpu_device *adev,
AMDGPU_TILING_GET(tiling_flags, SWIZZLE_MODE);
tiling_info->gfx9.shaderEnable = 1;
+#ifdef CONFIG_DRM_AMD_DC_DCN3_0
+ if (adev->asic_type == CHIP_SIENNA_CICHLID)
+ tiling_info->gfx9.num_pkrs = adev->gfx.config.gb_addr_config_fields.num_pkrs;
+
+#endif
ret = fill_plane_dcc_attributes(adev, afb, format, rotation,
plane_size, tiling_info,
tiling_flags, dcc, address,
@@ -4546,10 +4569,8 @@ create_stream_for_sink(struct amdgpu_dm_connector *aconnector,
stream->use_vsc_sdp_for_colorimetry =
aconnector->dc_sink->is_vsc_sdp_colorimetry_supported;
} else {
- if (stream->link->dpcd_caps.dpcd_rev.raw >= 0x14 &&
- stream->link->dpcd_caps.dprx_feature.bits.VSC_SDP_COLORIMETRY_SUPPORTED) {
+ if (stream->link->dpcd_caps.dprx_feature.bits.VSC_SDP_COLORIMETRY_SUPPORTED)
stream->use_vsc_sdp_for_colorimetry = true;
- }
}
mod_build_vsc_infopacket(stream, &stream->vsc_infopacket);
}
@@ -5024,7 +5045,8 @@ create_validate_stream_for_sink(struct amdgpu_dm_connector *aconnector,
struct drm_connector *connector = &aconnector->base;
struct amdgpu_device *adev = connector->dev->dev_private;
struct dc_stream_state *stream;
- int requested_bpc = connector->state ? connector->state->max_requested_bpc : 8;
+ const struct drm_connector_state *drm_state = dm_state ? &dm_state->base : NULL;
+ int requested_bpc = drm_state ? drm_state->max_requested_bpc : 8;
enum dc_status dc_result = DC_OK;
do {
@@ -5039,11 +5061,12 @@ create_validate_stream_for_sink(struct amdgpu_dm_connector *aconnector,
dc_result = dc_validate_stream(adev->dm.dc, stream);
if (dc_result != DC_OK) {
- DRM_DEBUG_KMS("Mode %dx%d (clk %d) failed DC validation with error %d\n",
+ DRM_DEBUG_KMS("Mode %dx%d (clk %d) failed DC validation with error %d (%s)\n",
drm_mode->hdisplay,
drm_mode->vdisplay,
drm_mode->clock,
- dc_result);
+ dc_result,
+ dc_status_to_str(dc_result));
dc_stream_release(stream);
stream = NULL;
@@ -5424,7 +5447,7 @@ static int dm_encoder_helper_atomic_check(struct drm_encoder *encoder,
mst_mgr,
mst_port,
dm_new_connector_state->pbn,
- 0);
+ dm_mst_get_pbn_divider(aconnector->dc_link));
if (dm_new_connector_state->vcpi_slots < 0) {
DRM_DEBUG_ATOMIC("failed finding vcpi slots: %d\n", (int)dm_new_connector_state->vcpi_slots);
return dm_new_connector_state->vcpi_slots;
@@ -5536,7 +5559,7 @@ dm_drm_plane_duplicate_state(struct drm_plane *plane)
return &dm_plane_state->base;
}
-void dm_drm_plane_destroy_state(struct drm_plane *plane,
+static void dm_drm_plane_destroy_state(struct drm_plane *plane,
struct drm_plane_state *state)
{
struct dm_plane_state *dm_plane_state = to_dm_plane_state(state);
@@ -5665,6 +5688,17 @@ static void dm_plane_helper_cleanup_fb(struct drm_plane *plane,
amdgpu_bo_unref(&rbo);
}
+static int dm_plane_helper_check_state(struct drm_plane_state *state,
+ struct drm_crtc_state *new_crtc_state)
+{
+ int max_downscale = 0;
+ int max_upscale = INT_MAX;
+
+ /* TODO: These should be checked against DC plane caps */
+ return drm_atomic_helper_check_plane_state(
+ state, new_crtc_state, max_downscale, max_upscale, true, true);
+}
+
static int dm_plane_atomic_check(struct drm_plane *plane,
struct drm_plane_state *state)
{
@@ -5672,6 +5706,7 @@ static int dm_plane_atomic_check(struct drm_plane *plane,
struct dc *dc = adev->dm.dc;
struct dm_plane_state *dm_plane_state;
struct dc_scaling_info scaling_info;
+ struct drm_crtc_state *new_crtc_state;
int ret;
dm_plane_state = to_dm_plane_state(state);
@@ -5679,6 +5714,15 @@ static int dm_plane_atomic_check(struct drm_plane *plane,
if (!dm_plane_state->dc_state)
return 0;
+ new_crtc_state =
+ drm_atomic_get_new_crtc_state(state->state, state->crtc);
+ if (!new_crtc_state)
+ return -EINVAL;
+
+ ret = dm_plane_helper_check_state(state, new_crtc_state);
+ if (ret)
+ return ret;
+
ret = fill_dc_scaling_info(state, &scaling_info);
if (ret)
return ret;
@@ -8195,6 +8239,10 @@ static int dm_update_plane_state(struct dc *dc,
if (!needs_reset)
return 0;
+ ret = dm_plane_helper_check_state(new_plane_state, new_crtc_state);
+ if (ret)
+ return ret;
+
WARN_ON(dm_new_plane_state->dc_state);
dc_new_plane_state = dc_create_plane_state(dc);
@@ -8477,7 +8525,7 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev,
struct drm_plane_state *old_plane_state, *new_plane_state;
enum surface_update_type update_type = UPDATE_TYPE_FAST;
enum surface_update_type overall_update_type = UPDATE_TYPE_FAST;
-
+ enum dc_status status;
int ret, i;
/*
@@ -8689,8 +8737,10 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev,
ret = drm_dp_mst_atomic_check(state);
if (ret)
goto fail;
-
- if (dc_validate_global_state(dc, dm_state->context, false) != DC_OK) {
+ status = dc_validate_global_state(dc, dm_state->context, false);
+ if (status != DC_OK) {
+ DC_LOG_WARNING("DC global validation failure: %s (%d)",
+ dc_status_to_str(status), status);
ret = -EINVAL;
goto fail;
}
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h
index d61186ff411d..86c132ddc452 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h
@@ -139,10 +139,12 @@ struct amdgpu_dm_backlight_caps {
* @backlight_link: Link on which to control backlight
* @backlight_caps: Capabilities of the backlight device
* @freesync_module: Module handling freesync calculations
+ * @hdcp_workqueue: AMDGPU content protection queue
* @fw_dmcu: Reference to DMCU firmware
* @dmcu_fw_version: Version of the DMCU firmware
* @soc_bounding_box: SOC bounding box values provided by gpu_info FW
* @cached_state: Caches device atomic state for suspend/resume
+ * @cached_dc_state: Cached state of content streams
* @compressor: Frame buffer compression buffer. See &struct dm_comressor_info
*/
struct amdgpu_display_manager {
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_color.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_color.c
index 4dfb6b55bb2e..b321ff654df4 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_color.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_color.c
@@ -195,10 +195,13 @@ static int __set_legacy_tf(struct dc_transfer_func *func,
bool has_rom)
{
struct dc_gamma *gamma = NULL;
+ struct calculate_buffer cal_buffer = {0};
bool res;
ASSERT(lut && lut_size == MAX_COLOR_LEGACY_LUT_ENTRIES);
+ cal_buffer.buffer_index = -1;
+
gamma = dc_create_gamma();
if (!gamma)
return -ENOMEM;
@@ -208,7 +211,7 @@ static int __set_legacy_tf(struct dc_transfer_func *func,
__drm_lut_to_dc_gamma(lut, gamma, true);
res = mod_color_calculate_regamma_params(func, gamma, true, has_rom,
- NULL);
+ NULL, &cal_buffer);
dc_gamma_release(&gamma);
@@ -221,10 +224,13 @@ static int __set_output_tf(struct dc_transfer_func *func,
bool has_rom)
{
struct dc_gamma *gamma = NULL;
+ struct calculate_buffer cal_buffer = {0};
bool res;
ASSERT(lut && lut_size == MAX_COLOR_LUT_ENTRIES);
+ cal_buffer.buffer_index = -1;
+
gamma = dc_create_gamma();
if (!gamma)
return -ENOMEM;
@@ -248,7 +254,7 @@ static int __set_output_tf(struct dc_transfer_func *func,
*/
gamma->type = GAMMA_CS_TFM_1D;
res = mod_color_calculate_regamma_params(func, gamma, false,
- has_rom, NULL);
+ has_rom, NULL, &cal_buffer);
}
dc_gamma_release(&gamma);
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c
index 076af267b488..db4fab10a0c4 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c
@@ -859,8 +859,8 @@ static int hdcp_sink_capability_show(struct seq_file *m, void *data)
seq_printf(m, "%s:%d HDCP version: ", connector->name, connector->base.id);
- hdcp_cap = dc_link_is_hdcp14(aconnector->dc_link);
- hdcp2_cap = dc_link_is_hdcp22(aconnector->dc_link);
+ hdcp_cap = dc_link_is_hdcp14(aconnector->dc_link, aconnector->dc_sink->sink_signal);
+ hdcp2_cap = dc_link_is_hdcp22(aconnector->dc_link, aconnector->dc_sink->sink_signal);
if (hdcp_cap)
@@ -1058,7 +1058,6 @@ static const struct {
{"link_settings", &dp_link_settings_debugfs_fops},
{"phy_settings", &dp_phy_settings_debugfs_fop},
{"test_pattern", &dp_phy_test_pattern_fops},
- {"output_bpc", &output_bpc_fops},
{"vrr_range", &vrr_range_fops},
#ifdef CONFIG_DRM_AMD_DC_HDCP
{"hdcp_sink_capability", &hdcp_sink_capability_fops},
@@ -1142,6 +1141,9 @@ void connector_debugfs_init(struct amdgpu_dm_connector *connector)
debugfs_create_file_unsafe("force_yuv420_output", 0644, dir, connector,
&force_yuv420_output_fops);
+ debugfs_create_file("output_bpc", 0644, dir, connector,
+ &output_bpc_fops);
+
connector->debugfs_dpcd_address = 0;
connector->debugfs_dpcd_size = 0;
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.c
index dcf84a61de37..694c5bc93665 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.c
@@ -390,6 +390,42 @@ void hdcp_destroy(struct hdcp_workqueue *hdcp_work)
kfree(hdcp_work);
}
+
+static bool enable_assr(void *handle, struct dc_link *link)
+{
+
+ struct hdcp_workqueue *hdcp_work = handle;
+ struct mod_hdcp hdcp = hdcp_work->hdcp;
+ struct psp_context *psp = hdcp.config.psp.handle;
+ struct ta_dtm_shared_memory *dtm_cmd;
+ bool res = true;
+
+ if (!psp->dtm_context.dtm_initialized) {
+ DRM_INFO("Failed to enable ASSR, DTM TA is not initialized.");
+ return false;
+ }
+
+ dtm_cmd = (struct ta_dtm_shared_memory *)psp->dtm_context.dtm_shared_buf;
+
+ mutex_lock(&psp->dtm_context.mutex);
+ memset(dtm_cmd, 0, sizeof(struct ta_dtm_shared_memory));
+
+ dtm_cmd->cmd_id = TA_DTM_COMMAND__TOPOLOGY_ASSR_ENABLE;
+ dtm_cmd->dtm_in_message.topology_assr_enable.display_topology_dig_be_index = link->link_enc_hw_inst;
+ dtm_cmd->dtm_status = TA_DTM_STATUS__GENERIC_FAILURE;
+
+ psp_dtm_invoke(psp, dtm_cmd->cmd_id);
+
+ if (dtm_cmd->dtm_status != TA_DTM_STATUS__SUCCESS) {
+ DRM_INFO("Failed to enable ASSR");
+ res = false;
+ }
+
+ mutex_unlock(&psp->dtm_context.mutex);
+
+ return res;
+}
+
static void update_config(void *handle, struct cp_psp_stream_config *config)
{
struct hdcp_workqueue *hdcp_work = handle;
@@ -510,8 +546,10 @@ static ssize_t srm_data_read(struct file *filp, struct kobject *kobj, struct bin
srm = psp_get_srm(work->hdcp.config.psp.handle, &srm_version, &srm_size);
- if (!srm)
- return -EINVAL;
+ if (!srm) {
+ ret = -EINVAL;
+ goto ret;
+ }
if (pos >= srm_size)
ret = 0;
@@ -599,6 +637,7 @@ struct hdcp_workqueue *hdcp_create_workqueue(struct amdgpu_device *adev, struct
}
cp_psp->funcs.update_stream_config = update_config;
+ cp_psp->funcs.enable_assr = enable_assr;
cp_psp->handle = hdcp_work;
/* File created at /sys/class/drm/card0/device/hdcp_srm*/
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c
index b086d5c906e0..d839eb14e3f0 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c
@@ -627,3 +627,23 @@ void dm_set_dcn_clocks(struct dc_context *ctx, struct dc_clocks *clks)
{
/* TODO: something */
}
+#ifdef CONFIG_DRM_AMD_DC_DCN3_0
+
+void *dm_helpers_allocate_gpu_mem(
+ struct dc_context *ctx,
+ enum dc_gpu_mem_alloc_type type,
+ size_t size,
+ long long *addr)
+{
+ // TODO
+ return NULL;
+}
+
+void dm_helpers_free_gpu_mem(
+ struct dc_context *ctx,
+ enum dc_gpu_mem_alloc_type type,
+ void *pvMem)
+{
+ // TODO
+}
+#endif
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
index ae0a7ef1d595..cf15248739f7 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
@@ -522,6 +522,8 @@ static void increase_dsc_bpp(struct drm_atomic_state *state,
int link_timeslots_used;
int fair_pbn_alloc;
+ pbn_per_timeslot = dm_mst_get_pbn_divider(dc_link);
+
for (i = 0; i < count; i++) {
if (vars[i].dsc_enabled) {
initial_slack[i] = kbps_to_peak_pbn(params[i].bw_range.max_kbps) - vars[i].pbn;
@@ -533,9 +535,6 @@ static void increase_dsc_bpp(struct drm_atomic_state *state,
}
}
- pbn_per_timeslot = dc_link_bandwidth_kbps(dc_link,
- dc_link_get_link_cap(dc_link)) / (8 * 1000 * 54);
-
while (remaining_to_increase) {
next_index = -1;
min_initial_slack = -1;
@@ -564,7 +563,7 @@ static void increase_dsc_bpp(struct drm_atomic_state *state,
params[next_index].port->mgr,
params[next_index].port,
vars[next_index].pbn,
- dm_mst_get_pbn_divider(dc_link)) < 0)
+ pbn_per_timeslot) < 0)
return;
if (!drm_dp_mst_atomic_check(state)) {
vars[next_index].bpp_x16 = bpp_x16_from_pbn(params[next_index], vars[next_index].pbn);
@@ -574,7 +573,7 @@ static void increase_dsc_bpp(struct drm_atomic_state *state,
params[next_index].port->mgr,
params[next_index].port,
vars[next_index].pbn,
- dm_mst_get_pbn_divider(dc_link)) < 0)
+ pbn_per_timeslot) < 0)
return;
}
} else {
@@ -583,7 +582,7 @@ static void increase_dsc_bpp(struct drm_atomic_state *state,
params[next_index].port->mgr,
params[next_index].port,
vars[next_index].pbn,
- dm_mst_get_pbn_divider(dc_link)) < 0)
+ pbn_per_timeslot) < 0)
return;
if (!drm_dp_mst_atomic_check(state)) {
vars[next_index].bpp_x16 = params[next_index].bw_range.max_target_bpp_x16;
@@ -593,7 +592,7 @@ static void increase_dsc_bpp(struct drm_atomic_state *state,
params[next_index].port->mgr,
params[next_index].port,
vars[next_index].pbn,
- dm_mst_get_pbn_divider(dc_link)) < 0)
+ pbn_per_timeslot) < 0)
return;
}
}
@@ -647,7 +646,7 @@ static void try_disable_dsc(struct drm_atomic_state *state,
params[next_index].port->mgr,
params[next_index].port,
vars[next_index].pbn,
- 0) < 0)
+ dm_mst_get_pbn_divider(dc_link)) < 0)
return;
if (!drm_dp_mst_atomic_check(state)) {
@@ -718,7 +717,7 @@ static bool compute_mst_dsc_configs_for_link(struct drm_atomic_state *state,
params[i].port->mgr,
params[i].port,
vars[i].pbn,
- 0) < 0)
+ dm_mst_get_pbn_divider(dc_link)) < 0)
return false;
}
if (!drm_dp_mst_atomic_check(state)) {
@@ -746,7 +745,7 @@ static bool compute_mst_dsc_configs_for_link(struct drm_atomic_state *state,
params[i].port->mgr,
params[i].port,
vars[i].pbn,
- 0) < 0)
+ dm_mst_get_pbn_divider(dc_link)) < 0)
return false;
}
}
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_pp_smu.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_pp_smu.c
index a2e1a73f66b8..c5f2216e59c4 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_pp_smu.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_pp_smu.c
@@ -106,7 +106,7 @@ bool dm_pp_apply_display_requirements(
adev->powerplay.pp_funcs->display_configuration_change(
adev->powerplay.pp_handle,
&adev->pm.pm_display_cfg);
- else
+ else if (adev->smu.ppt_funcs)
smu_display_configuration_change(smu,
&adev->pm.pm_display_cfg);
@@ -530,6 +530,8 @@ bool dm_pp_get_static_clocks(
&pp_clk_info);
else if (adev->smu.ppt_funcs)
ret = smu_get_current_clocks(&adev->smu, &pp_clk_info);
+ else
+ return false;
if (ret)
return false;
@@ -590,7 +592,7 @@ void pp_rv_set_wm_ranges(struct pp_smu *pp,
if (pp_funcs && pp_funcs->set_watermarks_for_clocks_ranges)
pp_funcs->set_watermarks_for_clocks_ranges(pp_handle,
&wm_with_clock_ranges);
- else
+ else if (adev->smu.ppt_funcs)
smu_set_watermarks_for_clock_ranges(&adev->smu,
&wm_with_clock_ranges);
}
@@ -660,7 +662,7 @@ void pp_rv_set_hard_min_fclk_by_freq(struct pp_smu *pp, int mhz)
pp_funcs->set_hard_min_fclk_by_freq(pp_handle, mhz);
}
-enum pp_smu_status pp_nv_set_wm_ranges(struct pp_smu *pp,
+static enum pp_smu_status pp_nv_set_wm_ranges(struct pp_smu *pp,
struct pp_smu_wm_range_sets *ranges)
{
const struct dc_context *ctx = pp->dm;
@@ -728,7 +730,7 @@ enum pp_smu_status pp_nv_set_pme_wa_enable(struct pp_smu *pp)
return PP_SMU_RESULT_OK;
}
-enum pp_smu_status pp_nv_set_display_count(struct pp_smu *pp, int count)
+static enum pp_smu_status pp_nv_set_display_count(struct pp_smu *pp, int count)
{
const struct dc_context *ctx = pp->dm;
struct amdgpu_device *adev = ctx->driver_context;
@@ -744,7 +746,8 @@ enum pp_smu_status pp_nv_set_display_count(struct pp_smu *pp, int count)
return PP_SMU_RESULT_OK;
}
-enum pp_smu_status pp_nv_set_min_deep_sleep_dcfclk(struct pp_smu *pp, int mhz)
+static enum pp_smu_status
+pp_nv_set_min_deep_sleep_dcfclk(struct pp_smu *pp, int mhz)
{
const struct dc_context *ctx = pp->dm;
struct amdgpu_device *adev = ctx->driver_context;
@@ -760,7 +763,7 @@ enum pp_smu_status pp_nv_set_min_deep_sleep_dcfclk(struct pp_smu *pp, int mhz)
return PP_SMU_RESULT_OK;
}
-enum pp_smu_status pp_nv_set_hard_min_dcefclk_by_freq(
+static enum pp_smu_status pp_nv_set_hard_min_dcefclk_by_freq(
struct pp_smu *pp, int mhz)
{
const struct dc_context *ctx = pp->dm;
@@ -783,7 +786,8 @@ enum pp_smu_status pp_nv_set_hard_min_dcefclk_by_freq(
return PP_SMU_RESULT_OK;
}
-enum pp_smu_status pp_nv_set_hard_min_uclk_by_freq(struct pp_smu *pp, int mhz)
+static enum pp_smu_status
+pp_nv_set_hard_min_uclk_by_freq(struct pp_smu *pp, int mhz)
{
const struct dc_context *ctx = pp->dm;
struct amdgpu_device *adev = ctx->driver_context;
@@ -805,7 +809,7 @@ enum pp_smu_status pp_nv_set_hard_min_uclk_by_freq(struct pp_smu *pp, int mhz)
return PP_SMU_RESULT_OK;
}
-enum pp_smu_status pp_nv_set_pstate_handshake_support(
+static enum pp_smu_status pp_nv_set_pstate_handshake_support(
struct pp_smu *pp, BOOLEAN pstate_handshake_supported)
{
const struct dc_context *ctx = pp->dm;
@@ -818,7 +822,7 @@ enum pp_smu_status pp_nv_set_pstate_handshake_support(
return PP_SMU_RESULT_OK;
}
-enum pp_smu_status pp_nv_set_voltage_by_freq(struct pp_smu *pp,
+static enum pp_smu_status pp_nv_set_voltage_by_freq(struct pp_smu *pp,
enum pp_smu_nv_clock_id clock_id, int mhz)
{
const struct dc_context *ctx = pp->dm;
@@ -853,7 +857,7 @@ enum pp_smu_status pp_nv_set_voltage_by_freq(struct pp_smu *pp,
return PP_SMU_RESULT_OK;
}
-enum pp_smu_status pp_nv_get_maximum_sustainable_clocks(
+static enum pp_smu_status pp_nv_get_maximum_sustainable_clocks(
struct pp_smu *pp, struct pp_smu_nv_clock_table *max_clocks)
{
const struct dc_context *ctx = pp->dm;
@@ -872,7 +876,7 @@ enum pp_smu_status pp_nv_get_maximum_sustainable_clocks(
return PP_SMU_RESULT_FAIL;
}
-enum pp_smu_status pp_nv_get_uclk_dpm_states(struct pp_smu *pp,
+static enum pp_smu_status pp_nv_get_uclk_dpm_states(struct pp_smu *pp,
unsigned int *clock_values_in_khz, unsigned int *num_states)
{
const struct dc_context *ctx = pp->dm;
@@ -892,7 +896,7 @@ enum pp_smu_status pp_nv_get_uclk_dpm_states(struct pp_smu *pp,
return PP_SMU_RESULT_FAIL;
}
-enum pp_smu_status pp_rn_get_dpm_clock_table(
+static enum pp_smu_status pp_rn_get_dpm_clock_table(
struct pp_smu *pp, struct dpm_clocks *clock_table)
{
const struct dc_context *ctx = pp->dm;
@@ -911,7 +915,7 @@ enum pp_smu_status pp_rn_get_dpm_clock_table(
return PP_SMU_RESULT_FAIL;
}
-enum pp_smu_status pp_rn_set_wm_ranges(struct pp_smu *pp,
+static enum pp_smu_status pp_rn_set_wm_ranges(struct pp_smu *pp,
struct pp_smu_wm_range_sets *ranges)
{
const struct dc_context *ctx = pp->dm;
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_services.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_services.c
index 022da5d45d4d..51f57420fadd 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_services.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_services.c
@@ -47,29 +47,4 @@ void dm_perf_trace_timestamp(const char *func_name, unsigned int line)
{
}
-bool dm_write_persistent_data(struct dc_context *ctx,
- const struct dc_sink *sink,
- const char *module_name,
- const char *key_name,
- void *params,
- unsigned int size,
- struct persistent_data_flag *flag)
-{
- /*TODO implement*/
- return false;
-}
-
-bool dm_read_persistent_data(struct dc_context *ctx,
- const struct dc_sink *sink,
- const char *module_name,
- const char *key_name,
- void *params,
- unsigned int size,
- struct persistent_data_flag *flag)
-{
- /*TODO implement*/
- return false;
-}
-
/**** power component interfaces ****/
-
diff --git a/drivers/gpu/drm/amd/display/dc/Makefile b/drivers/gpu/drm/amd/display/dc/Makefile
index 6e3dddc73246..e0f4f1be1618 100644
--- a/drivers/gpu/drm/amd/display/dc/Makefile
+++ b/drivers/gpu/drm/amd/display/dc/Makefile
@@ -32,6 +32,10 @@ DC_LIBS += dcn10 dml
DC_LIBS += dcn21
endif
+ifdef CONFIG_DRM_AMD_DC_DCN3_0
+DC_LIBS += dcn30
+endif
+
DC_LIBS += dce120
DC_LIBS += dce112
diff --git a/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c b/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c
index 37fa7b48250e..b8684131151d 100644
--- a/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c
+++ b/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c
@@ -1154,6 +1154,9 @@ static enum bp_result bios_parser_get_firmware_info(
result = get_firmware_info_v3_2(bp, info);
break;
case 3:
+#ifdef CONFIG_DRM_AMD_DC_DCN3_0
+ case 4:
+#endif
result = get_firmware_info_v3_2(bp, info);
break;
default:
@@ -1375,6 +1378,63 @@ static struct atom_encoder_caps_record *get_encoder_cap_record(
return NULL;
}
+static enum bp_result get_vram_info_v23(
+ struct bios_parser *bp,
+ struct dc_vram_info *info)
+{
+ struct atom_vram_info_header_v2_3 *info_v23;
+ enum bp_result result = BP_RESULT_OK;
+
+ info_v23 = GET_IMAGE(struct atom_vram_info_header_v2_3,
+ DATA_TABLES(vram_info));
+
+ if (info_v23 == NULL)
+ return BP_RESULT_BADBIOSTABLE;
+
+ info->num_chans = info_v23->vram_module[0].channel_num;
+ info->dram_channel_width_bytes = (1 << info_v23->vram_module[0].channel_width) / 8;
+
+ return result;
+}
+
+static enum bp_result get_vram_info_v24(
+ struct bios_parser *bp,
+ struct dc_vram_info *info)
+{
+ struct atom_vram_info_header_v2_4 *info_v24;
+ enum bp_result result = BP_RESULT_OK;
+
+ info_v24 = GET_IMAGE(struct atom_vram_info_header_v2_4,
+ DATA_TABLES(vram_info));
+
+ if (info_v24 == NULL)
+ return BP_RESULT_BADBIOSTABLE;
+
+ info->num_chans = info_v24->vram_module[0].channel_num;
+ info->dram_channel_width_bytes = (1 << info_v24->vram_module[0].channel_width) / 8;
+
+ return result;
+}
+
+static enum bp_result get_vram_info_v25(
+ struct bios_parser *bp,
+ struct dc_vram_info *info)
+{
+ struct atom_vram_info_header_v2_5 *info_v25;
+ enum bp_result result = BP_RESULT_OK;
+
+ info_v25 = GET_IMAGE(struct atom_vram_info_header_v2_5,
+ DATA_TABLES(vram_info));
+
+ if (info_v25 == NULL)
+ return BP_RESULT_BADBIOSTABLE;
+
+ info->num_chans = info_v25->vram_module[0].channel_num;
+ info->dram_channel_width_bytes = (1 << info_v25->vram_module[0].channel_width) / 8;
+
+ return result;
+}
+
/*
* get_integrated_info_v11
*
@@ -1666,6 +1726,46 @@ static enum bp_result construct_integrated_info(
return result;
}
+static enum bp_result bios_parser_get_vram_info(
+ struct dc_bios *dcb,
+ struct dc_vram_info *info)
+{
+ struct bios_parser *bp = BP_FROM_DCB(dcb);
+ enum bp_result result = BP_RESULT_BADBIOSTABLE;
+ struct atom_common_table_header *header;
+ struct atom_data_revision revision;
+
+ if (info && DATA_TABLES(vram_info)) {
+ header = GET_IMAGE(struct atom_common_table_header,
+ DATA_TABLES(vram_info));
+
+ get_atom_data_table_revision(header, &revision);
+
+ switch (revision.major) {
+ case 2:
+ switch (revision.minor) {
+ case 3:
+ result = get_vram_info_v23(bp, info);
+ break;
+ case 4:
+ result = get_vram_info_v24(bp, info);
+ break;
+ case 5:
+ result = get_vram_info_v25(bp, info);
+ break;
+ default:
+ break;
+ }
+ break;
+
+ default:
+ return result;
+ }
+
+ }
+ return result;
+}
+
static struct integrated_info *bios_parser_create_integrated_info(
struct dc_bios *dcb)
{
@@ -1877,6 +1977,108 @@ static enum bp_result bios_get_board_layout_info(
return BP_RESULT_OK;
}
+
+static uint16_t bios_parser_pack_data_tables(
+ struct dc_bios *dcb,
+ void *dst)
+{
+#ifdef PACK_BIOS_DATA
+ struct bios_parser *bp = BP_FROM_DCB(dcb);
+ struct atom_rom_header_v2_2 *rom_header = NULL;
+ struct atom_rom_header_v2_2 *packed_rom_header = NULL;
+ struct atom_common_table_header *data_tbl_header = NULL;
+ struct atom_master_list_of_data_tables_v2_1 *data_tbl_list = NULL;
+ struct atom_master_data_table_v2_1 *packed_master_data_tbl = NULL;
+ struct atom_data_revision tbl_rev = {0};
+ uint16_t *rom_header_offset = NULL;
+ const uint8_t *bios = bp->base.bios;
+ uint8_t *bios_dst = (uint8_t *)dst;
+ uint16_t packed_rom_header_offset;
+ uint16_t packed_masterdatatable_offset;
+ uint16_t packed_data_tbl_offset;
+ uint16_t data_tbl_offset;
+ unsigned int i;
+
+ rom_header_offset =
+ GET_IMAGE(uint16_t, OFFSET_TO_ATOM_ROM_HEADER_POINTER);
+
+ if (!rom_header_offset)
+ return 0;
+
+ rom_header = GET_IMAGE(struct atom_rom_header_v2_2, *rom_header_offset);
+
+ if (!rom_header)
+ return 0;
+
+ get_atom_data_table_revision(&rom_header->table_header, &tbl_rev);
+ if (!(tbl_rev.major >= 2 && tbl_rev.minor >= 2))
+ return 0;
+
+ get_atom_data_table_revision(&bp->master_data_tbl->table_header, &tbl_rev);
+ if (!(tbl_rev.major >= 2 && tbl_rev.minor >= 1))
+ return 0;
+
+ packed_rom_header_offset =
+ OFFSET_TO_ATOM_ROM_HEADER_POINTER + sizeof(*rom_header_offset);
+
+ packed_masterdatatable_offset =
+ packed_rom_header_offset + rom_header->table_header.structuresize;
+
+ packed_data_tbl_offset =
+ packed_masterdatatable_offset +
+ bp->master_data_tbl->table_header.structuresize;
+
+ packed_rom_header =
+ (struct atom_rom_header_v2_2 *)(bios_dst + packed_rom_header_offset);
+
+ packed_master_data_tbl =
+ (struct atom_master_data_table_v2_1 *)(bios_dst +
+ packed_masterdatatable_offset);
+
+ memcpy(bios_dst, bios, OFFSET_TO_ATOM_ROM_HEADER_POINTER);
+
+ *((uint16_t *)(bios_dst + OFFSET_TO_ATOM_ROM_HEADER_POINTER)) =
+ packed_rom_header_offset;
+
+ memcpy(bios_dst + packed_rom_header_offset, rom_header,
+ rom_header->table_header.structuresize);
+
+ packed_rom_header->masterdatatable_offset = packed_masterdatatable_offset;
+
+ memcpy(&packed_master_data_tbl->table_header,
+ &bp->master_data_tbl->table_header,
+ sizeof(bp->master_data_tbl->table_header));
+
+ data_tbl_list = &bp->master_data_tbl->listOfdatatables;
+
+ /* Each data table offset in data table list is 2 bytes,
+ * we can use that to iterate through listOfdatatables
+ * without knowing the name of each member.
+ */
+ for (i = 0; i < sizeof(*data_tbl_list)/sizeof(uint16_t); i++) {
+ data_tbl_offset = *((uint16_t *)data_tbl_list + i);
+
+ if (data_tbl_offset) {
+ data_tbl_header =
+ (struct atom_common_table_header *)(bios + data_tbl_offset);
+
+ memcpy(bios_dst + packed_data_tbl_offset, data_tbl_header,
+ data_tbl_header->structuresize);
+
+ *((uint16_t *)&packed_master_data_tbl->listOfdatatables + i) =
+ packed_data_tbl_offset;
+
+ packed_data_tbl_offset += data_tbl_header->structuresize;
+ } else {
+ *((uint16_t *)&packed_master_data_tbl->listOfdatatables + i) = 0;
+ }
+ }
+ return packed_data_tbl_offset;
+#endif
+ // TODO: There is data bytes alignment issue, disable it for now.
+ return 0;
+}
+
static const struct dc_vbios_funcs vbios_funcs = {
.get_connectors_number = bios_parser_get_connectors_number,
@@ -1925,6 +2127,7 @@ static const struct dc_vbios_funcs vbios_funcs = {
.bios_parser_destroy = firmware_parser_destroy,
.get_board_layout_info = bios_get_board_layout_info,
+ .pack_data_tables = bios_parser_pack_data_tables,
};
static bool bios_parser2_construct(
@@ -2006,6 +2209,7 @@ static bool bios_parser2_construct(
bp->base.integrated_info = bios_parser_create_integrated_info(&bp->base);
bp->base.fw_info_valid = bios_parser_get_firmware_info(&bp->base, &bp->base.fw_info) == BP_RESULT_OK;
+ bios_parser_get_vram_info(&bp->base, &bp->base.vram_info);
return true;
}
diff --git a/drivers/gpu/drm/amd/display/dc/bios/command_table_helper2.c b/drivers/gpu/drm/amd/display/dc/bios/command_table_helper2.c
index 204d7942a6e5..21ff6b686f5f 100644
--- a/drivers/gpu/drm/amd/display/dc/bios/command_table_helper2.c
+++ b/drivers/gpu/drm/amd/display/dc/bios/command_table_helper2.c
@@ -65,6 +65,11 @@ bool dal_bios_parser_init_cmd_tbl_helper2(
*h = dal_cmd_tbl_helper_dce112_get_table2();
return true;
#endif
+#if defined(CONFIG_DRM_AMD_DC_DCN3_0)
+ case DCN_VERSION_3_0:
+ *h = dal_cmd_tbl_helper_dce112_get_table2();
+ return true;
+#endif
default:
/* Unsupported DCE */
diff --git a/drivers/gpu/drm/amd/display/dc/bios/dce112/command_table_helper2_dce112.c b/drivers/gpu/drm/amd/display/dc/bios/dce112/command_table_helper2_dce112.c
index 755b6e33140a..bf0affef893f 100644
--- a/drivers/gpu/drm/amd/display/dc/bios/dce112/command_table_helper2_dce112.c
+++ b/drivers/gpu/drm/amd/display/dc/bios/dce112/command_table_helper2_dce112.c
@@ -388,3 +388,43 @@ const struct command_table_helper *dal_cmd_tbl_helper_dce112_get_table2(void)
{
return &command_table_helper_funcs;
}
+
+#if defined(CONFIG_DRM_AMD_DC_DCN3_0)
+/* function table */
+static const struct command_table_helper command_table_helper_funcs_dcn2x = {
+ .controller_id_to_atom = dal_cmd_table_helper_controller_id_to_atom2,
+ .encoder_action_to_atom = encoder_action_to_atom,
+ .engine_bp_to_atom = engine_bp_to_atom,
+ .clock_source_id_to_atom = clock_source_id_to_atom,
+ .clock_source_id_to_atom_phy_clk_src_id =
+ clock_source_id_to_atom_phy_clk_src_id,
+ .signal_type_to_atom_dig_mode = signal_type_to_atom_dig_mode,
+ .hpd_sel_to_atom = hpd_sel_to_atom,
+ .dig_encoder_sel_to_atom = dig_encoder_sel_to_atom,
+ .phy_id_to_atom = phy_id_to_atom,
+ .disp_power_gating_action_to_atom = disp_power_gating_action_to_atom,
+ .clock_source_id_to_ref_clk_src = NULL,
+ .transmitter_bp_to_atom = NULL,
+ .encoder_id_to_atom = dal_cmd_table_helper_encoder_id_to_atom2,
+ .encoder_mode_bp_to_atom =
+ dal_cmd_table_helper_encoder_mode_bp_to_atom2,
+ .dc_clock_type_to_atom = dc_clock_type_to_atom,
+ .transmitter_color_depth_to_atom = transmitter_color_depth_to_atom,
+
+};
+
+/*
+ * dal_cmd_tbl_helper_dce110_get_table
+ *
+ * @brief
+ * Initialize command table helper functions
+ *
+ * @param
+ * const struct command_table_helper **h - [out] struct of functions
+ *
+ */
+const struct command_table_helper *dal_cmd_tbl_helper_dcn2_get_table2(void)
+{
+ return &command_table_helper_funcs_dcn2x;
+}
+#endif
diff --git a/drivers/gpu/drm/amd/display/dc/bios/dce112/command_table_helper2_dce112.h b/drivers/gpu/drm/amd/display/dc/bios/dce112/command_table_helper2_dce112.h
index abf28a06f5bc..2d9e9f3c579d 100644
--- a/drivers/gpu/drm/amd/display/dc/bios/dce112/command_table_helper2_dce112.h
+++ b/drivers/gpu/drm/amd/display/dc/bios/dce112/command_table_helper2_dce112.h
@@ -30,5 +30,8 @@ struct command_table_helper;
/* Initialize command table helper functions */
const struct command_table_helper *dal_cmd_tbl_helper_dce112_get_table2(void);
+#if defined(CONFIG_DRM_AMD_DC_DCN3_0)
+const struct command_table_helper *dal_cmd_tbl_helper_dcn2_get_table2(void);
+#endif
#endif /* __DAL_COMMAND_TABLE_HELPER_DCE110_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/calcs/dcn_calc_auto.c b/drivers/gpu/drm/amd/display/dc/calcs/dcn_calc_auto.c
index 1ef0074302c5..41284e263325 100644
--- a/drivers/gpu/drm/amd/display/dc/calcs/dcn_calc_auto.c
+++ b/drivers/gpu/drm/amd/display/dc/calcs/dcn_calc_auto.c
@@ -805,7 +805,7 @@ void mode_support_and_system_configuration(struct dcn_bw_internal_vars *v)
if (v->pte_enable == dcn_bw_yes && v->dcc_enable[k] == dcn_bw_yes) {
v->time_for_meta_pte_without_immediate_flip = dcn_bw_max3(
- v->meta_pte_bytes_frame[k] / v->prefetch_bandwidth[k],
+ v->meta_pte_bytes_frame[k] / v->prefetch_bw[k],
v->extra_latency,
v->htotal[k] / v->pixel_clock[k] / 4.0);
} else {
@@ -814,7 +814,7 @@ void mode_support_and_system_configuration(struct dcn_bw_internal_vars *v)
if (v->pte_enable == dcn_bw_yes || v->dcc_enable[k] == dcn_bw_yes) {
v->time_for_meta_and_dpte_row_without_immediate_flip = dcn_bw_max3((
- v->meta_row_bytes[k] + v->dpte_bytes_per_row[k]) / v->prefetch_bandwidth[k],
+ v->meta_row_bytes[k] + v->dpte_bytes_per_row[k]) / v->prefetch_bw[k],
v->htotal[k] / v->pixel_clock[k] - v->time_for_meta_pte_without_immediate_flip,
v->extra_latency);
} else {
@@ -827,7 +827,7 @@ void mode_support_and_system_configuration(struct dcn_bw_internal_vars *v)
v->lines_for_meta_and_dpte_row_without_immediate_flip[k] =dcn_bw_floor2(4.0 * (v->time_for_meta_and_dpte_row_without_immediate_flip / (v->htotal[k] / v->pixel_clock[k]) + 0.125), 1.0) / 4;
v->maximum_vstartup = v->maximum_vstartup - 1;
- if (v->lines_for_meta_pte_without_immediate_flip[k] < 8.0 && v->lines_for_meta_and_dpte_row_without_immediate_flip[k] < 16.0)
+ if (v->lines_for_meta_pte_without_immediate_flip[k] < 32.0 && v->lines_for_meta_and_dpte_row_without_immediate_flip[k] < 16.0)
break;
} while(1);
diff --git a/drivers/gpu/drm/amd/display/dc/calcs/dcn_calcs.c b/drivers/gpu/drm/amd/display/dc/calcs/dcn_calcs.c
index 1e5a92b192a1..51397b565ddf 100644
--- a/drivers/gpu/drm/amd/display/dc/calcs/dcn_calcs.c
+++ b/drivers/gpu/drm/amd/display/dc/calcs/dcn_calcs.c
@@ -302,10 +302,17 @@ static void pipe_ctx_to_e2e_pipe_params (
struct _vcs_dpi_display_pipe_params_st *input)
{
input->src.is_hsplit = false;
- if (pipe->top_pipe != NULL && pipe->top_pipe->plane_state == pipe->plane_state)
+
+ /* stereo can never be split */
+ if (pipe->plane_state->stereo_format == PLANE_STEREO_FORMAT_SIDE_BY_SIDE ||
+ pipe->plane_state->stereo_format == PLANE_STEREO_FORMAT_TOP_AND_BOTTOM) {
+ /* reset the split group if it was already considered split. */
+ input->src.hsplit_grp = pipe->pipe_idx;
+ } else if (pipe->top_pipe != NULL && pipe->top_pipe->plane_state == pipe->plane_state) {
input->src.is_hsplit = true;
- else if (pipe->bottom_pipe != NULL && pipe->bottom_pipe->plane_state == pipe->plane_state)
+ } else if (pipe->bottom_pipe != NULL && pipe->bottom_pipe->plane_state == pipe->plane_state) {
input->src.is_hsplit = true;
+ }
if (pipe->plane_res.dpp->ctx->dc->debug.optimized_watermark) {
/*
@@ -374,6 +381,13 @@ static void pipe_ctx_to_e2e_pipe_params (
input->src.viewport_width_c = input->src.viewport_width;
input->src.viewport_height_c = input->src.viewport_height;
break;
+#if defined(CONFIG_DRM_AMD_DC_DCN3_0)
+ case SURFACE_PIXEL_FORMAT_GRPH_RGBE_ALPHA:
+ input->src.source_format = dm_rgbe_alpha;
+ input->src.viewport_width_c = input->src.viewport_width;
+ input->src.viewport_height_c = input->src.viewport_height;
+ break;
+#endif
default:
input->src.source_format = dm_444_32;
input->src.viewport_width_c = input->src.viewport_width;
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/Makefile b/drivers/gpu/drm/amd/display/dc/clk_mgr/Makefile
index c0f6a8c7de7d..6874276bb2a1 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/Makefile
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/Makefile
@@ -97,3 +97,13 @@ AMD_DAL_CLK_MGR_DCN21 = $(addprefix $(AMDDALPATH)/dc/clk_mgr/dcn21/,$(CLK_MGR_DC
AMD_DISPLAY_FILES += $(AMD_DAL_CLK_MGR_DCN21)
endif
+ifdef CONFIG_DRM_AMD_DC_DCN3_0
+###############################################################################
+# DCN30
+###############################################################################
+CLK_MGR_DCN30 = dcn30_clk_mgr.o dcn30_clk_mgr_smu_msg.o
+
+AMD_DAL_CLK_MGR_DCN30 = $(addprefix $(AMDDALPATH)/dc/clk_mgr/dcn30/,$(CLK_MGR_DCN30))
+
+AMD_DISPLAY_FILES += $(AMD_DAL_CLK_MGR_DCN30)
+endif
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/clk_mgr.c
index a5c2114e4292..f376058b5df6 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/clk_mgr.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/clk_mgr.c
@@ -38,6 +38,9 @@
#include "dcn10/rv2_clk_mgr.h"
#include "dcn20/dcn20_clk_mgr.h"
#include "dcn21/rn_clk_mgr.h"
+#if defined(CONFIG_DRM_AMD_DC_DCN3_0)
+#include "dcn30/dcn30_clk_mgr.h"
+#endif
int clk_mgr_helper_get_active_display_cnt(
@@ -169,6 +172,15 @@ struct clk_mgr *dc_clk_mgr_create(struct dc_context *ctx, struct pp_smu_funcs *p
break;
case FAMILY_NV:
+#if defined(CONFIG_DRM_AMD_DC_DCN3_0)
+ if (ASICREV_IS_SIENNA_CICHLID_P(asic_id.hw_internal_rev)) {
+ /* TODO: to add SIENNA_CICHLID clk_mgr support, once CLK IP header files are available,
+ * for now use DCN3AG clk mgr.
+ */
+ dcn3_clk_mgr_construct(ctx, clk_mgr, pp_smu, dccg);
+ break;
+ }
+#endif
dcn20_clk_mgr_construct(ctx, clk_mgr, pp_smu, dccg);
break;
#endif /* Family RV and NV*/
@@ -184,6 +196,16 @@ struct clk_mgr *dc_clk_mgr_create(struct dc_context *ctx, struct pp_smu_funcs *p
void dc_destroy_clk_mgr(struct clk_mgr *clk_mgr_base)
{
struct clk_mgr_internal *clk_mgr = TO_CLK_MGR_INTERNAL(clk_mgr_base);
+#ifdef CONFIG_DRM_AMD_DC_DCN3_0
+
+ switch (clk_mgr_base->ctx->asic_id.chip_family) {
+ case FAMILY_NV:
+ if (ASICREV_IS_SIENNA_CICHLID_P(clk_mgr_base->ctx->asic_id.hw_internal_rev)) {
+ dcn3_clk_mgr_destroy(clk_mgr);
+ break;
+ }
+ }
+#endif
kfree(clk_mgr);
}
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn20/dcn20_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn20/dcn20_clk_mgr.c
index 55d09adbf0d9..c63ec960e116 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn20/dcn20_clk_mgr.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn20/dcn20_clk_mgr.c
@@ -234,20 +234,26 @@ void dcn2_update_clocks(struct clk_mgr *clk_mgr_base,
dpp_clock_lowered = true;
clk_mgr->base.clks.dppclk_khz = new_clocks->dppclk_khz;
- if (pp_smu && pp_smu->set_voltage_by_freq)
- pp_smu->set_voltage_by_freq(&pp_smu->pp_smu, PP_SMU_NV_PIXELCLK, clk_mgr_base->clks.dppclk_khz / 1000);
-
update_dppclk = true;
}
if (should_set_clock(safe_to_lower, new_clocks->dispclk_khz, clk_mgr_base->clks.dispclk_khz)) {
clk_mgr_base->clks.dispclk_khz = new_clocks->dispclk_khz;
- if (pp_smu && pp_smu->set_voltage_by_freq)
- pp_smu->set_voltage_by_freq(&pp_smu->pp_smu, PP_SMU_NV_DISPCLK, clk_mgr_base->clks.dispclk_khz / 1000);
update_dispclk = true;
}
+ if (update_dppclk || update_dispclk) {
+ new_clocks->disp_dpp_voltage_level_khz = new_clocks->dppclk_khz;
+
+ if (update_dispclk)
+ new_clocks->disp_dpp_voltage_level_khz = new_clocks->dispclk_khz > new_clocks->dppclk_khz ? new_clocks->dispclk_khz : new_clocks->dppclk_khz;
+
+ clk_mgr_base->clks.disp_dpp_voltage_level_khz = new_clocks->disp_dpp_voltage_level_khz;
+ if (pp_smu && pp_smu->set_voltage_by_freq)
+ pp_smu->set_voltage_by_freq(&pp_smu->pp_smu, PP_SMU_NV_DISPCLK, clk_mgr_base->clks.disp_dpp_voltage_level_khz / 1000);
+ }
+
if (dc->config.forced_clocks == false || (force_reset && safe_to_lower)) {
if (dpp_clock_lowered) {
// if clock is being lowered, increase DTO before lowering refclk
@@ -403,6 +409,8 @@ static bool dcn2_are_clock_states_equal(struct dc_clocks *a,
return false;
else if (a->dppclk_khz != b->dppclk_khz)
return false;
+ else if (a->disp_dpp_voltage_level_khz != b->disp_dpp_voltage_level_khz)
+ return false;
else if (a->dcfclk_khz != b->dcfclk_khz)
return false;
else if (a->socclk_khz != b->socclk_khz)
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr.c
index 24c5765890fa..39788a7bd003 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr.c
@@ -153,8 +153,9 @@ void rn_update_clocks(struct clk_mgr *clk_mgr_base,
}
// workaround: Limit dppclk to 100Mhz to avoid lower eDP panel switch to plus 4K monitor underflow.
+ // Do not adjust dppclk if dppclk is 0 to avoid unexpected result
if (!IS_DIAG_DC(dc->ctx->dce_environment)) {
- if (new_clocks->dppclk_khz < 100000)
+ if (new_clocks->dppclk_khz < 100000 && new_clocks->dppclk_khz > 0)
new_clocks->dppclk_khz = 100000;
}
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn30/dalsmc.h b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn30/dalsmc.h
new file mode 100644
index 000000000000..5ed03287aaaf
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn30/dalsmc.h
@@ -0,0 +1,60 @@
+/*
+ * Copyright 2020 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+// TEMPORARY until this exists in the proper location
+#ifndef DALSMC_H
+#define DALSMC_H
+
+#define DALSMC_VERSION 0x1
+
+// SMU Response Codes:
+#define DALSMC_Result_OK 0x1
+#define DALSMC_Result_Failed 0xFF
+#define DALSMC_Result_UnknownCmd 0xFE
+#define DALSMC_Result_CmdRejectedPrereq 0xFD
+#define DALSMC_Result_CmdRejectedBusy 0xFC
+
+
+
+// Message Definitions:
+#define DALSMC_MSG_TestMessage 0x1
+#define DALSMC_MSG_GetSmuVersion 0x2
+#define DALSMC_MSG_GetDriverIfVersion 0x3
+#define DALSMC_MSG_GetMsgHeaderVersion 0x4
+#define DALSMC_MSG_SetDalDramAddrHigh 0x5
+#define DALSMC_MSG_SetDalDramAddrLow 0x6
+#define DALSMC_MSG_TransferTableSmu2Dram 0x7
+#define DALSMC_MSG_TransferTableDram2Smu 0x8
+#define DALSMC_MSG_SetHardMinByFreq 0x9
+#define DALSMC_MSG_SetHardMaxByFreq 0xA
+#define DALSMC_MSG_GetDpmFreqByIndex 0xB
+#define DALSMC_MSG_GetDcModeMaxDpmFreq 0xC
+#define DALSMC_MSG_SetMinDeepSleepDcefclk 0xD
+#define DALSMC_MSG_NumOfDisplays 0xE
+#define DALSMC_MSG_SetExternalClientDfCstateAllow 0x10
+#define DALSMC_MSG_BacoAudioD3PME 0x11
+#define DALSMC_Message_Count 0x12
+
+#endif
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn30/dcn30_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn30/dcn30_clk_mgr.c
new file mode 100644
index 000000000000..b27cb52903f5
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn30/dcn30_clk_mgr.c
@@ -0,0 +1,543 @@
+/*
+ * Copyright 2020 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#include "dccg.h"
+#include "clk_mgr_internal.h"
+
+#include "dcn30_clk_mgr_smu_msg.h"
+#include "dcn20/dcn20_clk_mgr.h"
+#include "dce100/dce_clk_mgr.h"
+#include "reg_helper.h"
+#include "core_types.h"
+#include "dm_helpers.h"
+
+#include "atomfirmware.h"
+
+
+#include "sienna_cichlid_ip_offset.h"
+#include "dcn/dcn_3_0_0_offset.h"
+#include "dcn/dcn_3_0_0_sh_mask.h"
+
+#include "nbio/nbio_7_4_offset.h"
+
+#include "dcn/dpcs_3_0_0_offset.h"
+#include "dcn/dpcs_3_0_0_sh_mask.h"
+
+#include "mmhub/mmhub_2_0_0_offset.h"
+#include "mmhub/mmhub_2_0_0_sh_mask.h"
+/*we don't have clk folder yet*/
+#include "dcn30/dcn30_clk_mgr.h"
+
+#undef FN
+#define FN(reg_name, field_name) \
+ clk_mgr->clk_mgr_shift->field_name, clk_mgr->clk_mgr_mask->field_name
+
+#define REG(reg) \
+ (clk_mgr->regs->reg)
+
+#define BASE_INNER(seg) DCN_BASE__INST0_SEG ## seg
+
+#define BASE(seg) BASE_INNER(seg)
+
+#define SR(reg_name)\
+ .reg_name = BASE(mm ## reg_name ## _BASE_IDX) + \
+ mm ## reg_name
+
+#undef CLK_SRI
+#define CLK_SRI(reg_name, block, inst)\
+ .reg_name = mm ## block ## _ ## reg_name
+
+static const struct clk_mgr_registers clk_mgr_regs = {
+ CLK_REG_LIST_DCN3()
+};
+
+static const struct clk_mgr_shift clk_mgr_shift = {
+ CLK_COMMON_MASK_SH_LIST_DCN20_BASE(__SHIFT)
+};
+
+static const struct clk_mgr_mask clk_mgr_mask = {
+ CLK_COMMON_MASK_SH_LIST_DCN20_BASE(_MASK)
+};
+
+
+/* Query SMU for all clock states for a particular clock */
+static void dcn3_init_single_clock(struct clk_mgr_internal *clk_mgr, PPCLK_e clk, unsigned int *entry_0, unsigned int *num_levels)
+{
+ unsigned int i;
+ char *entry_i = (char *)entry_0;
+ uint32_t ret = dcn30_smu_get_dpm_freq_by_index(clk_mgr, clk, 0xFF);
+
+ if (ret & (1 << 31))
+ /* fine-grained, only min and max */
+ *num_levels = 2;
+ else
+ /* discrete, a number of fixed states */
+ /* will set num_levels to 0 on failure */
+ *num_levels = ret & 0xFF;
+
+ /* if the initial message failed, num_levels will be 0 */
+ for (i = 0; i < *num_levels; i++) {
+ *((unsigned int *)entry_i) = (dcn30_smu_get_dpm_freq_by_index(clk_mgr, clk, i) & 0xFFFF);
+ entry_i += sizeof(clk_mgr->base.bw_params->clk_table.entries[0]);
+ }
+}
+
+static void dcn3_build_wm_range_table(struct clk_mgr_internal *clk_mgr)
+{
+ /* defaults */
+ double pstate_latency_us = clk_mgr->base.ctx->dc->dml.soc.dram_clock_change_latency_us;
+ double sr_exit_time_us = clk_mgr->base.ctx->dc->dml.soc.sr_exit_time_us;
+ double sr_enter_plus_exit_time_us = clk_mgr->base.ctx->dc->dml.soc.sr_enter_plus_exit_time_us;
+ uint16_t min_uclk_mhz = clk_mgr->base.bw_params->clk_table.entries[0].memclk_mhz;
+
+ /* Set A - Normal - default values*/
+ clk_mgr->base.bw_params->wm_table.nv_entries[WM_A].valid = true;
+ clk_mgr->base.bw_params->wm_table.nv_entries[WM_A].dml_input.pstate_latency_us = pstate_latency_us;
+ clk_mgr->base.bw_params->wm_table.nv_entries[WM_A].dml_input.sr_exit_time_us = sr_exit_time_us;
+ clk_mgr->base.bw_params->wm_table.nv_entries[WM_A].dml_input.sr_enter_plus_exit_time_us = sr_enter_plus_exit_time_us;
+ clk_mgr->base.bw_params->wm_table.nv_entries[WM_A].pmfw_breakdown.wm_type = WATERMARKS_CLOCK_RANGE;
+ clk_mgr->base.bw_params->wm_table.nv_entries[WM_A].pmfw_breakdown.min_dcfclk = 0;
+ clk_mgr->base.bw_params->wm_table.nv_entries[WM_A].pmfw_breakdown.max_dcfclk = 0xFFFF;
+ clk_mgr->base.bw_params->wm_table.nv_entries[WM_A].pmfw_breakdown.min_uclk = min_uclk_mhz;
+ clk_mgr->base.bw_params->wm_table.nv_entries[WM_A].pmfw_breakdown.max_uclk = 0xFFFF;
+
+ /* Set B - Performance - higher minimum clocks */
+// clk_mgr->base.bw_params->wm_table.nv_entries[WM_B].valid = true;
+// clk_mgr->base.bw_params->wm_table.nv_entries[WM_B].dml_input.pstate_latency_us = pstate_latency_us;
+// clk_mgr->base.bw_params->wm_table.nv_entries[WM_B].dml_input.sr_exit_time_us = sr_exit_time_us;
+// clk_mgr->base.bw_params->wm_table.nv_entries[WM_B].dml_input.sr_enter_plus_exit_time_us = sr_enter_plus_exit_time_us;
+// clk_mgr->base.bw_params->wm_table.nv_entries[WM_B].pmfw_breakdown.wm_type = WATERMARKS_CLOCK_RANGE;
+// clk_mgr->base.bw_params->wm_table.nv_entries[WM_B].pmfw_breakdown.min_dcfclk = TUNED VALUE;
+// clk_mgr->base.bw_params->wm_table.nv_entries[WM_B].pmfw_breakdown.max_dcfclk = 0xFFFF;
+// clk_mgr->base.bw_params->wm_table.nv_entries[WM_B].pmfw_breakdown.min_uclk = TUNED VALUE;
+// clk_mgr->base.bw_params->wm_table.nv_entries[WM_B].pmfw_breakdown.max_uclk = 0xFFFF;
+
+ /* Set C - Dummy P-State - P-State latency set to "dummy p-state" value */
+ clk_mgr->base.bw_params->wm_table.nv_entries[WM_C].valid = true;
+ clk_mgr->base.bw_params->wm_table.nv_entries[WM_C].dml_input.pstate_latency_us = clk_mgr->base.ctx->dc->dml.soc.dummy_pstate_latency_us;
+ clk_mgr->base.bw_params->wm_table.nv_entries[WM_C].dml_input.sr_exit_time_us = sr_exit_time_us;
+ clk_mgr->base.bw_params->wm_table.nv_entries[WM_C].dml_input.sr_enter_plus_exit_time_us = sr_enter_plus_exit_time_us;
+ clk_mgr->base.bw_params->wm_table.nv_entries[WM_C].pmfw_breakdown.wm_type = WATERMARKS_DUMMY_PSTATE;
+ clk_mgr->base.bw_params->wm_table.nv_entries[WM_C].pmfw_breakdown.min_dcfclk = 0;
+ clk_mgr->base.bw_params->wm_table.nv_entries[WM_C].pmfw_breakdown.max_dcfclk = 0xFFFF;
+ clk_mgr->base.bw_params->wm_table.nv_entries[WM_C].pmfw_breakdown.min_uclk = min_uclk_mhz;
+ clk_mgr->base.bw_params->wm_table.nv_entries[WM_C].pmfw_breakdown.max_uclk = 0xFFFF;
+
+}
+
+void dcn3_init_clocks(struct clk_mgr *clk_mgr_base)
+{
+ struct clk_mgr_internal *clk_mgr = TO_CLK_MGR_INTERNAL(clk_mgr_base);
+ unsigned int num_levels;
+
+ memset(&(clk_mgr_base->clks), 0, sizeof(struct dc_clocks));
+ clk_mgr_base->clks.p_state_change_support = true;
+ clk_mgr_base->clks.prev_p_state_change_support = true;
+ clk_mgr->smu_present = false;
+
+ if (!clk_mgr_base->bw_params)
+ return;
+
+ if (!clk_mgr_base->force_smu_not_present && dcn30_smu_get_smu_version(clk_mgr, &clk_mgr->smu_ver))
+ clk_mgr->smu_present = true;
+
+ if (!clk_mgr->smu_present)
+ return;
+
+ // do we fail if these fail? if so, how? do we not care to check?
+ dcn30_smu_check_driver_if_version(clk_mgr);
+ dcn30_smu_check_msg_header_version(clk_mgr);
+
+ /* DCFCLK */
+ dcn3_init_single_clock(clk_mgr, PPCLK_DCEFCLK,
+ &clk_mgr_base->bw_params->clk_table.entries[0].dcfclk_mhz,
+ &num_levels);
+
+ /* DTBCLK */
+ dcn3_init_single_clock(clk_mgr, PPCLK_DTBCLK,
+ &clk_mgr_base->bw_params->clk_table.entries[0].dtbclk_mhz,
+ &num_levels);
+
+ // DPREFCLK ???
+
+ /* DISPCLK */
+ dcn3_init_single_clock(clk_mgr, PPCLK_DISPCLK,
+ &clk_mgr_base->bw_params->clk_table.entries[0].dispclk_mhz,
+ &num_levels);
+
+ /* DPPCLK */
+ dcn3_init_single_clock(clk_mgr, PPCLK_PIXCLK,
+ &clk_mgr_base->bw_params->clk_table.entries[0].dppclk_mhz,
+ &num_levels);
+
+ /* PHYCLK */
+ dcn3_init_single_clock(clk_mgr, PPCLK_PHYCLK,
+ &clk_mgr_base->bw_params->clk_table.entries[0].phyclk_mhz,
+ &num_levels);
+
+ /* Get UCLK, update bounding box */
+ clk_mgr_base->funcs->get_memclk_states_from_smu(clk_mgr_base);
+
+ /* WM range table */
+ dcn3_build_wm_range_table(clk_mgr);
+}
+
+static int dcn30_get_vco_frequency_from_reg(struct clk_mgr_internal *clk_mgr)
+{
+ /* get FbMult value */
+ struct fixed31_32 pll_req;
+ /* get FbMult value */
+ uint32_t pll_req_reg = REG_READ(CLK0_CLK_PLL_REQ);
+
+ /* set up a fixed-point number
+ * this works because the int part is on the right edge of the register
+ * and the frac part is on the left edge
+ */
+ pll_req = dc_fixpt_from_int(pll_req_reg & clk_mgr->clk_mgr_mask->FbMult_int);
+ pll_req.value |= pll_req_reg & clk_mgr->clk_mgr_mask->FbMult_frac;
+
+ /* multiply by REFCLK period */
+ pll_req = dc_fixpt_mul_int(pll_req, clk_mgr->dfs_ref_freq_khz);
+
+ return dc_fixpt_floor(pll_req);
+}
+
+static void dcn3_update_clocks(struct clk_mgr *clk_mgr_base,
+ struct dc_state *context,
+ bool safe_to_lower)
+{
+ struct clk_mgr_internal *clk_mgr = TO_CLK_MGR_INTERNAL(clk_mgr_base);
+ struct dc_clocks *new_clocks = &context->bw_ctx.bw.dcn.clk;
+ struct dc *dc = clk_mgr_base->ctx->dc;
+ int display_count;
+ bool update_dppclk = false;
+ bool update_dispclk = false;
+ bool enter_display_off = false;
+ bool dpp_clock_lowered = false;
+ struct dmcu *dmcu = clk_mgr_base->ctx->dc->res_pool->dmcu;
+ bool force_reset = false;
+ bool update_uclk = false;
+
+ if (dc->work_arounds.skip_clock_update || !clk_mgr->smu_present)
+ return;
+
+ if (clk_mgr_base->clks.dispclk_khz == 0 ||
+ (dc->debug.force_clock_mode & 0x1)) {
+ /* this is from resume or boot up, if forced_clock cfg option used, we bypass program dispclk and DPPCLK, but need set them for S3. */
+ force_reset = true;
+
+ dcn2_read_clocks_from_hw_dentist(clk_mgr_base);
+
+ /* force_clock_mode 0x1: force reset the clock even it is the same clock as long as it is in Passive level. */
+ }
+ display_count = clk_mgr_helper_get_active_display_cnt(dc, context);
+
+ if (display_count == 0)
+ enter_display_off = true;
+
+ if (enter_display_off == safe_to_lower)
+ dcn30_smu_set_num_of_displays(clk_mgr, display_count);
+
+ if (should_set_clock(safe_to_lower, new_clocks->phyclk_khz, clk_mgr_base->clks.phyclk_khz)) {
+ clk_mgr_base->clks.phyclk_khz = new_clocks->phyclk_khz;
+ dcn30_smu_set_hard_min_by_freq(clk_mgr, PPCLK_PHYCLK, clk_mgr_base->clks.phyclk_khz / 1000);
+ }
+
+ if (dc->debug.force_min_dcfclk_mhz > 0)
+ new_clocks->dcfclk_khz = (new_clocks->dcfclk_khz > (dc->debug.force_min_dcfclk_mhz * 1000)) ?
+ new_clocks->dcfclk_khz : (dc->debug.force_min_dcfclk_mhz * 1000);
+
+ if (should_set_clock(safe_to_lower, new_clocks->dcfclk_khz, clk_mgr_base->clks.dcfclk_khz)) {
+ clk_mgr_base->clks.dcfclk_khz = new_clocks->dcfclk_khz;
+ dcn30_smu_set_hard_min_by_freq(clk_mgr, PPCLK_DCEFCLK, clk_mgr_base->clks.dcfclk_khz / 1000);
+ }
+
+ if (should_set_clock(safe_to_lower, new_clocks->dcfclk_deep_sleep_khz, clk_mgr_base->clks.dcfclk_deep_sleep_khz)) {
+ clk_mgr_base->clks.dcfclk_deep_sleep_khz = new_clocks->dcfclk_deep_sleep_khz;
+ dcn30_smu_set_min_deep_sleep_dcef_clk(clk_mgr, clk_mgr_base->clks.dcfclk_deep_sleep_khz / 1000);
+ }
+
+ if (should_set_clock(safe_to_lower, new_clocks->socclk_khz, clk_mgr_base->clks.socclk_khz))
+ /* We don't actually care about socclk, don't notify SMU of hard min */
+ clk_mgr_base->clks.socclk_khz = new_clocks->socclk_khz;
+
+ clk_mgr_base->clks.prev_p_state_change_support = clk_mgr_base->clks.p_state_change_support;
+ if (should_update_pstate_support(safe_to_lower, new_clocks->p_state_change_support, clk_mgr_base->clks.p_state_change_support)) {
+ clk_mgr_base->clks.p_state_change_support = new_clocks->p_state_change_support;
+
+ /* to disable P-State switching, set UCLK min = max */
+ if (!clk_mgr_base->clks.p_state_change_support)
+ dcn30_smu_set_hard_min_by_freq(clk_mgr, PPCLK_UCLK,
+ clk_mgr_base->bw_params->clk_table.entries[clk_mgr_base->bw_params->clk_table.num_entries - 1].memclk_mhz);
+ }
+
+ /* Always update saved value, even if new value not set due to P-State switching unsupported */
+ if (should_set_clock(safe_to_lower, new_clocks->dramclk_khz, clk_mgr_base->clks.dramclk_khz)) {
+ clk_mgr_base->clks.dramclk_khz = new_clocks->dramclk_khz;
+ update_uclk = true;
+ }
+
+ /* set UCLK to requested value if P-State switching is supported, or to re-enable P-State switching */
+ if (clk_mgr_base->clks.p_state_change_support &&
+ (update_uclk || !clk_mgr_base->clks.prev_p_state_change_support))
+ dcn30_smu_set_hard_min_by_freq(clk_mgr, PPCLK_UCLK, clk_mgr_base->clks.dramclk_khz / 1000);
+
+ if (should_set_clock(safe_to_lower, new_clocks->dppclk_khz, clk_mgr_base->clks.dppclk_khz)) {
+ if (clk_mgr_base->clks.dppclk_khz > new_clocks->dppclk_khz)
+ dpp_clock_lowered = true;
+
+ clk_mgr_base->clks.dppclk_khz = new_clocks->dppclk_khz;
+ dcn30_smu_set_hard_min_by_freq(clk_mgr, PPCLK_PIXCLK, clk_mgr_base->clks.dppclk_khz / 1000);
+ update_dppclk = true;
+ }
+
+ if (should_set_clock(safe_to_lower, new_clocks->dispclk_khz, clk_mgr_base->clks.dispclk_khz)) {
+ clk_mgr_base->clks.dispclk_khz = new_clocks->dispclk_khz;
+ dcn30_smu_set_hard_min_by_freq(clk_mgr, PPCLK_DISPCLK, clk_mgr_base->clks.dispclk_khz / 1000);
+ update_dispclk = true;
+ }
+
+ if (dc->config.forced_clocks == false || (force_reset && safe_to_lower)) {
+ if (dpp_clock_lowered) {
+ /* if clock is being lowered, increase DTO before lowering refclk */
+ dcn20_update_clocks_update_dpp_dto(clk_mgr, context, safe_to_lower);
+ dcn20_update_clocks_update_dentist(clk_mgr);
+ } else {
+ /* if clock is being raised, increase refclk before lowering DTO */
+ if (update_dppclk || update_dispclk)
+ dcn20_update_clocks_update_dentist(clk_mgr);
+ /* always update dtos unless clock is lowered and not safe to lower */
+ if (new_clocks->dppclk_khz >= dc->current_state->bw_ctx.bw.dcn.clk.dppclk_khz)
+ dcn20_update_clocks_update_dpp_dto(clk_mgr, context, safe_to_lower);
+ }
+ }
+
+ if (update_dispclk && dmcu && dmcu->funcs->is_dmcu_initialized(dmcu))
+ /*update dmcu for wait_loop count*/
+ dmcu->funcs->set_psr_wait_loop(dmcu,
+ clk_mgr_base->clks.dispclk_khz / 1000 / 7);
+}
+
+
+static void dcn3_notify_wm_ranges(struct clk_mgr *clk_mgr_base)
+{
+ unsigned int i;
+ long long table_addr;
+ WatermarksExternal_t *table;
+ struct clk_mgr_internal *clk_mgr = TO_CLK_MGR_INTERNAL(clk_mgr_base);
+
+ if (!clk_mgr->smu_present)
+ return;
+
+ /* need physical address of table to give to PMFW */
+ table = (WatermarksExternal_t *) dm_helpers_allocate_gpu_mem(clk_mgr->base.ctx, DC_MEM_ALLOC_TYPE_GART, sizeof(WatermarksExternal_t), &table_addr);
+
+ if (!table)
+ // should log failure
+ return;
+
+ memset(table, 0, sizeof(*table));
+
+ /* collect valid ranges, place in pmfw table */
+ for (i = 0; i < WM_SET_COUNT; i++)
+ if (clk_mgr->base.bw_params->wm_table.nv_entries[i].valid) {
+ table->Watermarks.WatermarkRow[WM_DCEFCLK][i].MinClock = clk_mgr->base.bw_params->wm_table.nv_entries[i].pmfw_breakdown.min_dcfclk;
+ table->Watermarks.WatermarkRow[WM_DCEFCLK][i].MaxClock = clk_mgr->base.bw_params->wm_table.nv_entries[i].pmfw_breakdown.max_dcfclk;
+ table->Watermarks.WatermarkRow[WM_DCEFCLK][i].MinUclk = clk_mgr->base.bw_params->wm_table.nv_entries[i].pmfw_breakdown.min_uclk;
+ table->Watermarks.WatermarkRow[WM_DCEFCLK][i].MaxUclk = clk_mgr->base.bw_params->wm_table.nv_entries[i].pmfw_breakdown.max_uclk;
+ table->Watermarks.WatermarkRow[WM_DCEFCLK][i].WmSetting = i;
+ table->Watermarks.WatermarkRow[WM_DCEFCLK][i].Flags = clk_mgr->base.bw_params->wm_table.nv_entries[i].pmfw_breakdown.wm_type;
+ }
+
+ dcn30_smu_set_dram_addr_high(clk_mgr, table_addr >> 32);
+ dcn30_smu_set_dram_addr_low(clk_mgr, table_addr & 0xFFFFFFFF);
+ dcn30_smu_transfer_wm_table_dram_2_smu(clk_mgr);
+
+ dm_helpers_free_gpu_mem(clk_mgr->base.ctx, DC_MEM_ALLOC_TYPE_GART, table);
+}
+
+/* Set min memclk to minimum, either constrained by the current mode or DPM0 */
+static void dcn3_set_hard_min_memclk(struct clk_mgr *clk_mgr_base, bool current_mode)
+{
+ struct clk_mgr_internal *clk_mgr = TO_CLK_MGR_INTERNAL(clk_mgr_base);
+
+ if (!clk_mgr->smu_present)
+ return;
+
+ if (current_mode)
+ dcn30_smu_set_hard_min_by_freq(clk_mgr, PPCLK_UCLK,
+ clk_mgr_base->clks.dramclk_khz / 1000);
+ else
+ dcn30_smu_set_hard_min_by_freq(clk_mgr, PPCLK_UCLK,
+ clk_mgr_base->bw_params->clk_table.entries[0].memclk_mhz);
+}
+
+/* Set max memclk to highest DPM value */
+static void dcn3_set_hard_max_memclk(struct clk_mgr *clk_mgr_base)
+{
+ struct clk_mgr_internal *clk_mgr = TO_CLK_MGR_INTERNAL(clk_mgr_base);
+
+ if (!clk_mgr->smu_present)
+ return;
+
+ dcn30_smu_set_hard_max_by_freq(clk_mgr, PPCLK_UCLK,
+ clk_mgr_base->bw_params->clk_table.entries[clk_mgr_base->bw_params->clk_table.num_entries - 1].memclk_mhz);
+}
+
+/* Get current memclk states, update bounding box */
+static void dcn3_get_memclk_states_from_smu(struct clk_mgr *clk_mgr_base)
+{
+ struct clk_mgr_internal *clk_mgr = TO_CLK_MGR_INTERNAL(clk_mgr_base);
+ unsigned int num_levels;
+
+ if (!clk_mgr->smu_present)
+ return;
+
+ /* Refresh memclk states */
+ dcn3_init_single_clock(clk_mgr, PPCLK_UCLK,
+ &clk_mgr_base->bw_params->clk_table.entries[0].memclk_mhz,
+ &num_levels);
+ clk_mgr_base->bw_params->clk_table.num_entries = num_levels ? num_levels : 1;
+
+ /* Refresh bounding box */
+ clk_mgr_base->ctx->dc->res_pool->funcs->update_bw_bounding_box(
+ clk_mgr->base.ctx->dc, clk_mgr_base->bw_params);
+}
+
+static bool dcn3_are_clock_states_equal(struct dc_clocks *a,
+ struct dc_clocks *b)
+{
+ if (a->dispclk_khz != b->dispclk_khz)
+ return false;
+ else if (a->dppclk_khz != b->dppclk_khz)
+ return false;
+ else if (a->dcfclk_khz != b->dcfclk_khz)
+ return false;
+ else if (a->dcfclk_deep_sleep_khz != b->dcfclk_deep_sleep_khz)
+ return false;
+ else if (a->phyclk_khz != b->phyclk_khz)
+ return false;
+ else if (a->dramclk_khz != b->dramclk_khz)
+ return false;
+ else if (a->p_state_change_support != b->p_state_change_support)
+ return false;
+
+ return true;
+}
+
+static void dcn3_enable_pme_wa(struct clk_mgr *clk_mgr_base)
+{
+ struct clk_mgr_internal *clk_mgr = TO_CLK_MGR_INTERNAL(clk_mgr_base);
+
+ if (!clk_mgr->smu_present)
+ return;
+
+ dcn30_smu_set_pme_workaround(clk_mgr);
+}
+
+static struct clk_mgr_funcs dcn3_funcs = {
+ .get_dp_ref_clk_frequency = dce12_get_dp_ref_freq_khz,
+ .update_clocks = dcn3_update_clocks,
+ .init_clocks = dcn3_init_clocks,
+ .notify_wm_ranges = dcn3_notify_wm_ranges,
+ .set_hard_min_memclk = dcn3_set_hard_min_memclk,
+ .set_hard_max_memclk = dcn3_set_hard_max_memclk,
+ .get_memclk_states_from_smu = dcn3_get_memclk_states_from_smu,
+ .are_clock_states_equal = dcn3_are_clock_states_equal,
+ .enable_pme_wa = dcn3_enable_pme_wa
+};
+
+static void dcn3_init_clocks_fpga(struct clk_mgr *clk_mgr)
+{
+ dcn2_init_clocks(clk_mgr);
+
+/* TODO: Implement the functions and remove the ifndef guard */
+}
+
+static struct clk_mgr_funcs dcn3_fpga_funcs = {
+ .get_dp_ref_clk_frequency = dce12_get_dp_ref_freq_khz,
+ .update_clocks = dcn2_update_clocks_fpga,
+ .init_clocks = dcn3_init_clocks_fpga,
+};
+
+/*todo for dcn30 for clk register offset*/
+void dcn3_clk_mgr_construct(
+ struct dc_context *ctx,
+ struct clk_mgr_internal *clk_mgr,
+ struct pp_smu_funcs *pp_smu,
+ struct dccg *dccg)
+{
+ clk_mgr->base.ctx = ctx;
+ clk_mgr->base.funcs = &dcn3_funcs;
+ clk_mgr->regs = &clk_mgr_regs;
+ clk_mgr->clk_mgr_shift = &clk_mgr_shift;
+ clk_mgr->clk_mgr_mask = &clk_mgr_mask;
+
+ clk_mgr->dccg = dccg;
+ clk_mgr->dfs_bypass_disp_clk = 0;
+
+ clk_mgr->dprefclk_ss_percentage = 0;
+ clk_mgr->dprefclk_ss_divider = 1000;
+ clk_mgr->ss_on_dprefclk = false;
+ clk_mgr->dfs_ref_freq_khz = 100000;
+
+ clk_mgr->base.dprefclk_khz = 730000; // 700 MHz planned if VCO is 3.85 GHz, will be retrieved
+
+ if (IS_FPGA_MAXIMUS_DC(ctx->dce_environment)) {
+ clk_mgr->base.funcs = &dcn3_fpga_funcs;
+ clk_mgr->base.dentist_vco_freq_khz = 3650000;
+
+ } else {
+ struct clk_state_registers_and_bypass s = { 0 };
+
+ /* integer part is now VCO frequency in kHz */
+ clk_mgr->base.dentist_vco_freq_khz = dcn30_get_vco_frequency_from_reg(clk_mgr);
+
+ /* in case we don't get a value from the register, use default */
+ if (clk_mgr->base.dentist_vco_freq_khz == 0)
+ clk_mgr->base.dentist_vco_freq_khz = 3650000;
+ /* Convert dprefclk units from MHz to KHz */
+ /* Value already divided by 10, some resolution lost */
+
+ /*TODO: uncomment assert once dcn3_dump_clk_registers is implemented */
+ //ASSERT(s.dprefclk != 0);
+ if (s.dprefclk != 0)
+ clk_mgr->base.dprefclk_khz = s.dprefclk * 1000;
+ }
+
+ clk_mgr->dfs_bypass_enabled = false;
+
+ clk_mgr->smu_present = false;
+
+ dce_clock_read_ss_info(clk_mgr);
+
+ clk_mgr->base.bw_params = kzalloc(sizeof(*clk_mgr->base.bw_params), GFP_KERNEL);
+}
+
+void dcn3_clk_mgr_destroy(struct clk_mgr_internal *clk_mgr)
+{
+ if (clk_mgr->base.bw_params)
+ kfree(clk_mgr->base.bw_params);
+}
diff --git a/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd_vbios.h b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn30/dcn30_clk_mgr.h
index b6deb8e2590f..dd4a0bd72458 100644
--- a/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd_vbios.h
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn30/dcn30_clk_mgr.h
@@ -1,5 +1,5 @@
/*
- * Copyright 2019 Advanced Micro Devices, Inc.
+ * Copyright 2020 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
@@ -23,19 +23,16 @@
*
*/
-#ifndef _DMUB_CMD_VBIOS_H_
-#define _DMUB_CMD_VBIOS_H_
+#ifndef __DCN30_CLK_MGR_H__
+#define __DCN30_CLK_MGR_H__
-/*
- * Command IDs should be treated as stable ABI.
- * Do not reuse or modify IDs.
- */
+void dcn3_init_clocks(struct clk_mgr *clk_mgr_base);
+
+void dcn3_clk_mgr_construct(struct dc_context *ctx,
+ struct clk_mgr_internal *clk_mgr,
+ struct pp_smu_funcs *pp_smu,
+ struct dccg *dccg);
-enum dmub_cmd_vbios_type {
- DMUB_CMD__VBIOS_DIGX_ENCODER_CONTROL = 0,
- DMUB_CMD__VBIOS_DIG1_TRANSMITTER_CONTROL = 1,
- DMUB_CMD__VBIOS_SET_PIXEL_CLOCK = 2,
- DMUB_CMD__VBIOS_ENABLE_DISP_POWER_GATING = 3,
-};
+void dcn3_clk_mgr_destroy(struct clk_mgr_internal *clk_mgr);
-#endif /* _DMUB_CMD_VBIOS_H_ */
+#endif //__DCN30_CLK_MGR_H__
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn30/dcn30_clk_mgr_smu_msg.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn30/dcn30_clk_mgr_smu_msg.c
new file mode 100644
index 000000000000..986c53a3b6a8
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn30/dcn30_clk_mgr_smu_msg.c
@@ -0,0 +1,255 @@
+/*
+ * Copyright 2020 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#include <linux/delay.h>
+#include "dcn30_clk_mgr_smu_msg.h"
+
+#include "clk_mgr_internal.h"
+#include "reg_helper.h"
+#include "dalsmc.h"
+
+#define mmDAL_MSG_REG 0x1628A
+#define mmDAL_ARG_REG 0x16273
+#define mmDAL_RESP_REG 0x16274
+
+#define REG(reg_name) \
+ mm ## reg_name
+
+/*
+ * Function to be used instead of REG_WAIT macro because the wait ends when
+ * the register is NOT EQUAL to zero, and because the translation in msg_if.h
+ * won't work with REG_WAIT.
+ */
+static uint32_t dcn30_smu_wait_for_response(struct clk_mgr_internal *clk_mgr, unsigned int delay_us, unsigned int max_retries)
+{
+ uint32_t reg = 0;
+
+ do {
+ reg = REG_READ(DAL_RESP_REG);
+ if (reg)
+ break;
+
+ if (delay_us >= 1000)
+ msleep(delay_us/1000);
+ else if (delay_us > 0)
+ udelay(delay_us);
+ } while (max_retries--);
+
+ /* handle DALSMC_Result_CmdRejectedBusy? */
+
+ /* Log? */
+
+ return reg;
+}
+
+static bool dcn30_smu_send_msg_with_param(struct clk_mgr_internal *clk_mgr, uint32_t msg_id, uint32_t param_in, uint32_t *param_out)
+{
+ /* Wait for response register to be ready */
+ dcn30_smu_wait_for_response(clk_mgr, 10, 200000);
+
+ /* Clear response register */
+ REG_WRITE(DAL_RESP_REG, 0);
+
+ /* Set the parameter register for the SMU message */
+ REG_WRITE(DAL_ARG_REG, param_in);
+
+ /* Trigger the message transaction by writing the message ID */
+ REG_WRITE(DAL_MSG_REG, msg_id);
+
+ /* Wait for response */
+ if (dcn30_smu_wait_for_response(clk_mgr, 10, 200000) == DALSMC_Result_OK) {
+ if (param_out)
+ *param_out = REG_READ(DAL_ARG_REG);
+
+ return true;
+ }
+
+ return false;
+}
+
+/* Test message should return input + 1 */
+bool dcn30_smu_test_message(struct clk_mgr_internal *clk_mgr, uint32_t input)
+{
+ uint32_t response = 0;
+
+ if (dcn30_smu_send_msg_with_param(clk_mgr,
+ DALSMC_MSG_TestMessage, input, &response))
+ if (response == input + 1)
+ return true;
+
+ return false;
+}
+
+bool dcn30_smu_get_smu_version(struct clk_mgr_internal *clk_mgr, unsigned int *version)
+{
+ if (dcn30_smu_send_msg_with_param(clk_mgr,
+ DALSMC_MSG_GetSmuVersion, 0, version))
+ return true;
+
+ return false;
+}
+
+/* Message output should match SMU11_DRIVER_IF_VERSION in smu11_driver_if.h */
+bool dcn30_smu_check_driver_if_version(struct clk_mgr_internal *clk_mgr)
+{
+ uint32_t response = 0;
+
+ if (dcn30_smu_send_msg_with_param(clk_mgr,
+ DALSMC_MSG_GetDriverIfVersion, 0, &response))
+ if (response == SMU11_DRIVER_IF_VERSION)
+ return true;
+
+ return false;
+}
+
+/* Message output should match DALSMC_VERSION in dalsmc.h */
+bool dcn30_smu_check_msg_header_version(struct clk_mgr_internal *clk_mgr)
+{
+ uint32_t response = 0;
+
+ if (dcn30_smu_send_msg_with_param(clk_mgr,
+ DALSMC_MSG_GetMsgHeaderVersion, 0, &response))
+ if (response == DALSMC_VERSION)
+ return true;
+
+ return false;
+}
+
+void dcn30_smu_set_dram_addr_high(struct clk_mgr_internal *clk_mgr, uint32_t addr_high)
+{
+ dcn30_smu_send_msg_with_param(clk_mgr,
+ DALSMC_MSG_SetDalDramAddrHigh, addr_high, NULL);
+}
+
+void dcn30_smu_set_dram_addr_low(struct clk_mgr_internal *clk_mgr, uint32_t addr_low)
+{
+ dcn30_smu_send_msg_with_param(clk_mgr,
+ DALSMC_MSG_SetDalDramAddrLow, addr_low, NULL);
+}
+
+void dcn30_smu_transfer_wm_table_smu_2_dram(struct clk_mgr_internal *clk_mgr)
+{
+ dcn30_smu_send_msg_with_param(clk_mgr,
+ DALSMC_MSG_TransferTableSmu2Dram, TABLE_WATERMARKS, NULL);
+}
+
+void dcn30_smu_transfer_wm_table_dram_2_smu(struct clk_mgr_internal *clk_mgr)
+{
+ dcn30_smu_send_msg_with_param(clk_mgr,
+ DALSMC_MSG_TransferTableDram2Smu, TABLE_WATERMARKS, NULL);
+}
+
+/* Returns the actual frequency that was set in MHz, 0 on failure */
+unsigned int dcn30_smu_set_hard_min_by_freq(struct clk_mgr_internal *clk_mgr, PPCLK_e clk, uint16_t freq_mhz)
+{
+ uint32_t response = 0;
+
+ /* bits 23:16 for clock type, lower 16 bits for frequency in MHz */
+ uint32_t param = (clk << 16) | freq_mhz;
+
+ dcn30_smu_send_msg_with_param(clk_mgr,
+ DALSMC_MSG_SetHardMinByFreq, param, &response);
+
+ return response;
+}
+
+/* Returns the actual frequency that was set in MHz, 0 on failure */
+unsigned int dcn30_smu_set_hard_max_by_freq(struct clk_mgr_internal *clk_mgr, PPCLK_e clk, uint16_t freq_mhz)
+{
+ uint32_t response = 0;
+
+ /* bits 23:16 for clock type, lower 16 bits for frequency in MHz */
+ uint32_t param = (clk << 16) | freq_mhz;
+
+ dcn30_smu_send_msg_with_param(clk_mgr,
+ DALSMC_MSG_SetHardMaxByFreq, param, &response);
+
+ return response;
+}
+
+/*
+ * Frequency in MHz returned in lower 16 bits for valid DPM level
+ *
+ * Call with dpm_level = 0xFF to query features, return value will be:
+ * Bits 7:0 - number of DPM levels
+ * Bit 28 - 1 = auto DPM on
+ * Bit 29 - 1 = sweep DPM on
+ * Bit 30 - 1 = forced DPM on
+ * Bit 31 - 0 = discrete, 1 = fine-grained
+ *
+ * With fine-grained DPM, only min and max frequencies will be reported
+ *
+ * Returns 0 on failure
+ */
+unsigned int dcn30_smu_get_dpm_freq_by_index(struct clk_mgr_internal *clk_mgr, PPCLK_e clk, uint8_t dpm_level)
+{
+ uint32_t response = 0;
+
+ /* bits 23:16 for clock type, lower 8 bits for DPM level */
+ uint32_t param = (clk << 16) | dpm_level;
+
+ dcn30_smu_send_msg_with_param(clk_mgr,
+ DALSMC_MSG_GetDpmFreqByIndex, param, &response);
+
+ return response;
+}
+
+/* Returns the max DPM frequency in DC mode in MHz, 0 on failure */
+unsigned int dcn30_smu_get_dc_mode_max_dpm_freq(struct clk_mgr_internal *clk_mgr, PPCLK_e clk)
+{
+ uint32_t response = 0;
+
+ /* bits 23:16 for clock type */
+ uint32_t param = clk << 16;
+
+ dcn30_smu_send_msg_with_param(clk_mgr,
+ DALSMC_MSG_GetDcModeMaxDpmFreq, param, &response);
+
+ return response;
+}
+
+void dcn30_smu_set_min_deep_sleep_dcef_clk(struct clk_mgr_internal *clk_mgr, uint32_t freq_mhz)
+{
+ dcn30_smu_send_msg_with_param(clk_mgr,
+ DALSMC_MSG_SetMinDeepSleepDcefclk, freq_mhz, NULL);
+}
+
+void dcn30_smu_set_num_of_displays(struct clk_mgr_internal *clk_mgr, uint32_t num_displays)
+{
+ dcn30_smu_send_msg_with_param(clk_mgr,
+ DALSMC_MSG_NumOfDisplays, num_displays, NULL);
+}
+
+void dcn30_smu_set_external_client_df_cstate_allow(struct clk_mgr_internal *clk_mgr, bool enable)
+{
+ dcn30_smu_send_msg_with_param(clk_mgr,
+ DALSMC_MSG_SetExternalClientDfCstateAllow, enable ? 1 : 0, NULL);
+}
+
+void dcn30_smu_set_pme_workaround(struct clk_mgr_internal *clk_mgr)
+{
+ dcn30_smu_send_msg_with_param(clk_mgr,
+ DALSMC_MSG_BacoAudioD3PME, 0, NULL);
+}
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn30/dcn30_clk_mgr_smu_msg.h b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn30/dcn30_clk_mgr_smu_msg.h
new file mode 100644
index 000000000000..236f20ec90d4
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn30/dcn30_clk_mgr_smu_msg.h
@@ -0,0 +1,108 @@
+/*
+ * Copyright 2020 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef DAL_DC_DCN30_CLK_MGR_SMU_MSG_H_
+#define DAL_DC_DCN30_CLK_MGR_SMU_MSG_H_
+
+#include "core_types.h"
+
+#define SMU11_DRIVER_IF_VERSION 0x1F
+
+typedef enum {
+ PPCLK_GFXCLK = 0,
+ PPCLK_SOCCLK,
+ PPCLK_UCLK,
+ PPCLK_FCLK,
+ PPCLK_DCLK_0,
+ PPCLK_VCLK_0,
+ PPCLK_DCLK_1,
+ PPCLK_VCLK_1,
+ PPCLK_DCEFCLK,
+ PPCLK_DISPCLK,
+ PPCLK_PIXCLK,
+ PPCLK_PHYCLK,
+ PPCLK_DTBCLK,
+ PPCLK_COUNT,
+} PPCLK_e;
+
+typedef struct {
+ uint16_t MinClock; // This is either DCEFCLK or SOCCLK (in MHz)
+ uint16_t MaxClock; // This is either DCEFCLK or SOCCLK (in MHz)
+ uint16_t MinUclk;
+ uint16_t MaxUclk;
+
+ uint8_t WmSetting;
+ uint8_t Flags;
+ uint8_t Padding[2];
+
+} WatermarkRowGeneric_t;
+
+#define NUM_WM_RANGES 4
+
+typedef enum {
+ WM_SOCCLK = 0,
+ WM_DCEFCLK,
+ WM_COUNT,
+} WM_CLOCK_e;
+
+typedef enum {
+ WATERMARKS_CLOCK_RANGE = 0,
+ WATERMARKS_DUMMY_PSTATE,
+ WATERMARKS_COUNT,
+} WATERMARKS_FLAGS_e;
+
+typedef struct {
+ // Watermarks
+ WatermarkRowGeneric_t WatermarkRow[WM_COUNT][NUM_WM_RANGES];
+} Watermarks_t;
+
+typedef struct {
+ Watermarks_t Watermarks;
+
+ uint32_t MmHubPadding[8]; // SMU internal use
+} WatermarksExternal_t;
+
+#define TABLE_WATERMARKS 1
+
+struct clk_mgr_internal;
+
+bool dcn30_smu_test_message(struct clk_mgr_internal *clk_mgr, uint32_t input);
+bool dcn30_smu_get_smu_version(struct clk_mgr_internal *clk_mgr, unsigned int *version);
+bool dcn30_smu_check_driver_if_version(struct clk_mgr_internal *clk_mgr);
+bool dcn30_smu_check_msg_header_version(struct clk_mgr_internal *clk_mgr);
+void dcn30_smu_set_dram_addr_high(struct clk_mgr_internal *clk_mgr, uint32_t addr_high);
+void dcn30_smu_set_dram_addr_low(struct clk_mgr_internal *clk_mgr, uint32_t addr_low);
+void dcn30_smu_transfer_wm_table_smu_2_dram(struct clk_mgr_internal *clk_mgr);
+void dcn30_smu_transfer_wm_table_dram_2_smu(struct clk_mgr_internal *clk_mgr);
+unsigned int dcn30_smu_set_hard_min_by_freq(struct clk_mgr_internal *clk_mgr, PPCLK_e clk, uint16_t freq_mhz);
+unsigned int dcn30_smu_set_hard_max_by_freq(struct clk_mgr_internal *clk_mgr, PPCLK_e clk, uint16_t freq_mhz);
+unsigned int dcn30_smu_get_dpm_freq_by_index(struct clk_mgr_internal *clk_mgr, PPCLK_e clk, uint8_t dpm_level);
+unsigned int dcn30_smu_get_dc_mode_max_dpm_freq(struct clk_mgr_internal *clk_mgr, PPCLK_e clk);
+void dcn30_smu_set_min_deep_sleep_dcef_clk(struct clk_mgr_internal *clk_mgr, uint32_t freq_mhz);
+void dcn30_smu_set_num_of_displays(struct clk_mgr_internal *clk_mgr, uint32_t num_displays);
+void dcn30_smu_set_external_client_df_cstate_allow(struct clk_mgr_internal *clk_mgr, bool enable);
+void dcn30_smu_set_pme_workaround(struct clk_mgr_internal *clk_mgr);
+
+#endif /* DAL_DC_DCN30_CLK_MGR_SMU_MSG_H_ */
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc.c b/drivers/gpu/drm/amd/display/dc/core/dc.c
index 6f93a6ca4cf0..67402d75e67e 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc.c
@@ -68,6 +68,8 @@
#include "dmub/dmub_srv.h"
+#include "dce/dmub_hw_lock_mgr.h"
+
#define CTX \
dc->ctx
@@ -186,9 +188,10 @@ static bool create_links(
bool should_destory_link = false;
if (link->connector_signal == SIGNAL_TYPE_EDP) {
- if (dc->config.edp_not_connected)
- should_destory_link = true;
- else if (dc->debug.remove_disconnect_edp) {
+ if (dc->config.edp_not_connected) {
+ if (!IS_DIAG_DC(dc->ctx->dce_environment))
+ should_destory_link = true;
+ } else {
enum dc_connection_type type;
dc_link_detect_sink(link, &type);
if (type == dc_connection_none)
@@ -728,6 +731,9 @@ static bool dc_construct(struct dc *dc,
dc->clk_mgr = dc_clk_mgr_create(dc->ctx, dc->res_pool->pp_smu, dc->res_pool->dccg);
if (!dc->clk_mgr)
goto fail;
+#ifdef CONFIG_DRM_AMD_DC_DCN3_0
+ dc->clk_mgr->force_smu_not_present = init_params->force_smu_not_present;
+#endif
if (dc->res_pool->funcs->update_bw_bounding_box)
dc->res_pool->funcs->update_bw_bounding_box(dc, dc->clk_mgr->bw_params);
@@ -1244,7 +1250,6 @@ static enum dc_status dc_commit_state_no_check(struct dc *dc, struct dc_state *c
int i, k, l;
struct dc_stream_state *dc_streams[MAX_STREAMS] = {0};
- disable_dangling_plane(dc, context);
for (i = 0; i < context->stream_count; i++)
dc_streams[i] = context->streams[i];
@@ -1260,6 +1265,7 @@ static enum dc_status dc_commit_state_no_check(struct dc *dc, struct dc_state *c
if (dc->optimize_seamless_boot_streams == 0)
dc->hwss.prepare_bandwidth(dc, context);
+ disable_dangling_plane(dc, context);
/* re-program planes for existing stream, in case we need to
* free up plane resource for later use
*/
@@ -1382,6 +1388,43 @@ bool dc_commit_state(struct dc *dc, struct dc_state *context)
return (result == DC_OK);
}
+#if defined(CONFIG_DRM_AMD_DC_DCN3_0)
+bool dc_acquire_release_mpc_3dlut(
+ struct dc *dc, bool acquire,
+ struct dc_stream_state *stream,
+ struct dc_3dlut **lut,
+ struct dc_transfer_func **shaper)
+{
+ int pipe_idx;
+ bool ret = false;
+ bool found_pipe_idx = false;
+ const struct resource_pool *pool = dc->res_pool;
+ struct resource_context *res_ctx = &dc->current_state->res_ctx;
+ int mpcc_id = 0;
+
+ if (pool && res_ctx) {
+ if (acquire) {
+ /*find pipe idx for the given stream*/
+ for (pipe_idx = 0; pipe_idx < pool->pipe_count; pipe_idx++) {
+ if (res_ctx->pipe_ctx[pipe_idx].stream == stream) {
+ found_pipe_idx = true;
+ mpcc_id = res_ctx->pipe_ctx[pipe_idx].plane_res.hubp->inst;
+ break;
+ }
+ }
+ } else
+ found_pipe_idx = true;/*for release pipe_idx is not required*/
+
+ if (found_pipe_idx) {
+ if (acquire && pool->funcs->acquire_post_bldn_3dlut)
+ ret = pool->funcs->acquire_post_bldn_3dlut(res_ctx, pool, mpcc_id, lut, shaper);
+ else if (acquire == false && pool->funcs->release_post_bldn_3dlut)
+ ret = pool->funcs->release_post_bldn_3dlut(res_ctx, pool, lut, shaper);
+ }
+ }
+ return ret;
+}
+#endif
static bool is_flip_pending_in_pipes(struct dc *dc, struct dc_state *context)
{
int i;
@@ -2280,9 +2323,22 @@ static void commit_planes_for_stream(struct dc *dc,
}
if ((update_type != UPDATE_TYPE_FAST) && stream->update_flags.bits.dsc_changed)
- if (top_pipe_to_program->stream_res.tg->funcs->lock_doublebuffer_enable)
- top_pipe_to_program->stream_res.tg->funcs->lock_doublebuffer_enable(
- top_pipe_to_program->stream_res.tg);
+ if (top_pipe_to_program->stream_res.tg->funcs->lock_doublebuffer_enable) {
+ if (stream && should_use_dmub_lock(stream->link)) {
+ union dmub_hw_lock_flags hw_locks = { 0 };
+ struct dmub_hw_lock_inst_flags inst_flags = { 0 };
+
+ hw_locks.bits.lock_dig = 1;
+ inst_flags.dig_inst = top_pipe_to_program->stream_res.tg->inst;
+
+ dmub_hw_lock_mgr_cmd(dc->ctx->dmub_srv,
+ true,
+ &hw_locks,
+ &inst_flags);
+ } else
+ top_pipe_to_program->stream_res.tg->funcs->lock_doublebuffer_enable(
+ top_pipe_to_program->stream_res.tg);
+ }
if ((update_type != UPDATE_TYPE_FAST) && dc->hwss.interdependent_update_lock)
dc->hwss.interdependent_update_lock(dc, context, true);
@@ -2452,7 +2508,20 @@ static void commit_planes_for_stream(struct dc *dc,
top_pipe_to_program->stream_res.tg->funcs->wait_for_state(
top_pipe_to_program->stream_res.tg,
CRTC_STATE_VACTIVE);
- top_pipe_to_program->stream_res.tg->funcs->lock_doublebuffer_disable(
+
+ if (stream && should_use_dmub_lock(stream->link)) {
+ union dmub_hw_lock_flags hw_locks = { 0 };
+ struct dmub_hw_lock_inst_flags inst_flags = { 0 };
+
+ hw_locks.bits.lock_dig = 1;
+ inst_flags.dig_inst = top_pipe_to_program->stream_res.tg->inst;
+
+ dmub_hw_lock_mgr_cmd(dc->ctx->dmub_srv,
+ false,
+ &hw_locks,
+ &inst_flags);
+ } else
+ top_pipe_to_program->stream_res.tg->funcs->lock_doublebuffer_disable(
top_pipe_to_program->stream_res.tg);
}
@@ -2612,6 +2681,7 @@ void dc_interrupt_ack(struct dc *dc, enum dc_irq_source src)
dal_irq_service_ack(dc->res_pool->irqs, src);
}
+
void dc_set_power_state(
struct dc *dc,
enum dc_acpi_cm_power_state power_state)
@@ -2623,9 +2693,6 @@ void dc_set_power_state(
case DC_ACPI_CM_POWER_STATE_D0:
dc_resource_state_construct(dc, dc->current_state);
- if (dc->ctx->dmub_srv)
- dc_dmub_srv_wait_phy_init(dc->ctx->dmub_srv);
-
dc->hwss.init_hw(dc);
if (dc->hwss.init_sys_ctx != NULL &&
@@ -2840,3 +2907,51 @@ void dc_get_clock(struct dc *dc, enum dc_clock_type clock_type, struct dc_clock_
if (dc->hwss.get_clock)
dc->hwss.get_clock(dc, clock_type, clock_cfg);
}
+
+#if defined(CONFIG_DRM_AMD_DC_DCN3_0)
+
+void dc_allow_idle_optimizations(struct dc *dc, bool allow)
+{
+ if (dc->debug.disable_idle_power_optimizations)
+ return;
+
+ if (allow == dc->idle_optimizations_allowed)
+ return;
+
+ if (dc->hwss.apply_idle_power_optimizations && dc->hwss.apply_idle_power_optimizations(dc, allow))
+ dc->idle_optimizations_allowed = allow;
+}
+
+/*
+ * blank all streams, and set min and max memory clock to
+ * lowest and highest DPM level, respectively
+ */
+void dc_unlock_memory_clock_frequency(struct dc *dc)
+{
+ unsigned int i;
+
+ for (i = 0; i < MAX_PIPES; i++)
+ if (dc->current_state->res_ctx.pipe_ctx[i].plane_state)
+ core_link_disable_stream(&dc->current_state->res_ctx.pipe_ctx[i]);
+
+ dc->clk_mgr->funcs->set_hard_min_memclk(dc->clk_mgr, false);
+ dc->clk_mgr->funcs->set_hard_max_memclk(dc->clk_mgr);
+}
+
+/*
+ * set min memory clock to the min required for current mode,
+ * max to maxDPM, and unblank streams
+ */
+void dc_lock_memory_clock_frequency(struct dc *dc)
+{
+ unsigned int i;
+
+ dc->clk_mgr->funcs->get_memclk_states_from_smu(dc->clk_mgr);
+ dc->clk_mgr->funcs->set_hard_min_memclk(dc->clk_mgr, true);
+ dc->clk_mgr->funcs->set_hard_max_memclk(dc->clk_mgr);
+
+ for (i = 0; i < MAX_PIPES; i++)
+ if (dc->current_state->res_ctx.pipe_ctx[i].plane_state)
+ core_link_enable_stream(dc->current_state, &dc->current_state->res_ctx.pipe_ctx[i]);
+}
+#endif
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_debug.c b/drivers/gpu/drm/amd/display/dc/core/dc_debug.c
index 502ed3c7959d..87d89449b9af 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_debug.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_debug.c
@@ -365,3 +365,62 @@ void context_clock_trace(
context->bw_ctx.bw.dcn.clk.socclk_khz);
#endif
}
+
+/**
+ * dc_status_to_str - convert dc_status to a human readable string
+ * @status: dc_status to be converted
+ *
+ * Return:
+ * A string describing the DC status.
+ */
+char *dc_status_to_str(enum dc_status status)
+{
+ switch (status) {
+ case DC_OK:
+ return "DC OK";
+ case DC_NO_CONTROLLER_RESOURCE:
+ return "No controller resource";
+ case DC_NO_STREAM_ENC_RESOURCE:
+ return "No stream encoder";
+ case DC_NO_CLOCK_SOURCE_RESOURCE:
+ return "No clock source";
+ case DC_FAIL_CONTROLLER_VALIDATE:
+ return "Controller validation failure";
+ case DC_FAIL_ENC_VALIDATE:
+ return "Encoder validation failure";
+ case DC_FAIL_ATTACH_SURFACES:
+ return "Surfaces attachment failure";
+ case DC_FAIL_DETACH_SURFACES:
+ return "Surfaces detachment failure";
+ case DC_FAIL_SURFACE_VALIDATE:
+ return "Surface validation failure";
+ case DC_NO_DP_LINK_BANDWIDTH:
+ return "No DP link bandwidth";
+ case DC_EXCEED_DONGLE_CAP:
+ return "Exceed dongle capability";
+ case DC_SURFACE_PIXEL_FORMAT_UNSUPPORTED:
+ return "Unsupported pixel format";
+ case DC_FAIL_BANDWIDTH_VALIDATE:
+ return "Bandwidth validation failure (BW and Watermark)";
+ case DC_FAIL_SCALING:
+ return "Scaling failure";
+ case DC_FAIL_DP_LINK_TRAINING:
+ return "DP link training failure";
+ case DC_FAIL_DSC_VALIDATE:
+ return "DSC validation failure";
+ case DC_NO_DSC_RESOURCE:
+ return "No DSC resource";
+ case DC_FAIL_UNSUPPORTED_1:
+ return "Unsupported";
+ case DC_FAIL_CLK_EXCEED_MAX:
+ return "Clk exceed max failure";
+ case DC_FAIL_CLK_BELOW_MIN:
+ return "Fail clk below minimum";
+ case DC_FAIL_CLK_BELOW_CFG_REQUIRED:
+ return "Fail clk below required CFG (hard_min in PPLIB)";
+ case DC_ERROR_UNEXPECTED:
+ return "Unexpected error";
+ }
+
+ return "Unexpected status error";
+}
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link.c b/drivers/gpu/drm/amd/display/dc/core/dc_link.c
index 48ab51533d5d..02742cca4d84 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_link.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_link.c
@@ -521,11 +521,11 @@ static void link_disconnect_remap(struct dc_sink *prev_sink, struct dc_link *lin
}
#if defined(CONFIG_DRM_AMD_DC_HDCP)
-bool dc_link_is_hdcp14(struct dc_link *link)
+bool dc_link_is_hdcp14(struct dc_link *link, enum signal_type signal)
{
bool ret = false;
- switch (link->connector_signal) {
+ switch (signal) {
case SIGNAL_TYPE_DISPLAY_PORT:
case SIGNAL_TYPE_DISPLAY_PORT_MST:
ret = link->hdcp_caps.bcaps.bits.HDCP_CAPABLE;
@@ -545,11 +545,11 @@ bool dc_link_is_hdcp14(struct dc_link *link)
return ret;
}
-bool dc_link_is_hdcp22(struct dc_link *link)
+bool dc_link_is_hdcp22(struct dc_link *link, enum signal_type signal)
{
bool ret = false;
- switch (link->connector_signal) {
+ switch (signal) {
case SIGNAL_TYPE_DISPLAY_PORT:
case SIGNAL_TYPE_DISPLAY_PORT_MST:
ret = (link->hdcp_caps.bcaps.bits.HDCP_CAPABLE &&
@@ -690,12 +690,8 @@ static bool detect_dp(struct dc_link *link,
if (sink_caps->transaction_type == DDC_TRANSACTION_TYPE_I2C_OVER_AUX) {
sink_caps->signal = SIGNAL_TYPE_DISPLAY_PORT;
-
- dpcd_set_source_specific_data(link);
-
if (!detect_dp_sink_caps(link))
return false;
-
if (is_mst_supported(link)) {
sink_caps->signal = SIGNAL_TYPE_DISPLAY_PORT_MST;
link->type = dc_connection_mst_branch;
@@ -858,6 +854,7 @@ static bool dc_link_detect_helper(struct dc_link *link,
bool same_dpcd = true;
enum dc_connection_type new_connection_type = dc_connection_none;
bool perform_dp_seamless_boot = false;
+ const uint32_t post_oui_delay = 30; // 30ms
DC_LOGGER_INIT(link->ctx->logger);
@@ -870,6 +867,7 @@ static bool dc_link_detect_helper(struct dc_link *link,
// need to re-write OUI and brightness in resume case
if (link->connector_signal == SIGNAL_TYPE_EDP) {
dpcd_set_source_specific_data(link);
+ msleep(post_oui_delay);
dc_link_set_default_brightness_aux(link);
//TODO: use cached
}
@@ -925,8 +923,6 @@ static bool dc_link_detect_helper(struct dc_link *link,
case SIGNAL_TYPE_EDP: {
read_current_link_settings_on_detect(link);
- dpcd_set_source_specific_data(link);
-
detect_edp_sink_caps(link);
read_current_link_settings_on_detect(link);
sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C_OVER_AUX;
@@ -1636,6 +1632,7 @@ static enum dc_status enable_link_dp(struct dc_state *state,
int i;
bool apply_seamless_boot_optimization = false;
uint32_t bl_oled_enable_delay = 50; // in ms
+ const uint32_t post_oui_delay = 30; // 30ms
// check for seamless boot
for (i = 0; i < state->stream_count; i++) {
@@ -1662,6 +1659,8 @@ static enum dc_status enable_link_dp(struct dc_state *state,
// during mode switch we do DP_SET_POWER off then on, and OUI is lost
dpcd_set_source_specific_data(link);
+ if (link->dpcd_sink_ext_caps.raw != 0)
+ msleep(post_oui_delay);
skip_video_pattern = true;
@@ -2560,12 +2559,14 @@ bool dc_link_set_psr_allow_active(struct dc_link *link, bool allow_active, bool
struct dmcu *dmcu = dc->res_pool->dmcu;
struct dmub_psr *psr = dc->res_pool->psr;
+ link->psr_settings.psr_allow_active = allow_active;
+
if (psr != NULL && link->psr_settings.psr_feature_enabled)
psr->funcs->psr_enable(psr, allow_active);
else if ((dmcu != NULL && dmcu->funcs->is_dmcu_initialized(dmcu)) && link->psr_settings.psr_feature_enabled)
dmcu->funcs->set_psr_enable(dmcu, allow_active, wait);
-
- link->psr_settings.psr_allow_active = allow_active;
+ else
+ return false;
return true;
}
@@ -3134,6 +3135,11 @@ void core_link_enable_stream(
pipe_ctx->stream->link->link_state_valid = true;
+#if defined(CONFIG_DRM_AMD_DC_DCN3_0)
+ if (pipe_ctx->stream_res.tg->funcs->set_out_mux)
+ pipe_ctx->stream_res.tg->funcs->set_out_mux(pipe_ctx->stream_res.tg, OUT_MUX_DIO);
+#endif
+
if (dc_is_dvi_signal(pipe_ctx->stream->signal))
pipe_ctx->stream_res.stream_enc->funcs->dvi_set_stream_attribute(
pipe_ctx->stream_res.stream_enc,
@@ -3216,6 +3222,15 @@ void core_link_enable_stream(
CONTROLLER_DP_TEST_PATTERN_VIDEOMODE,
COLOR_DEPTH_UNDEFINED);
+ /* This second call is needed to reconfigure the DIG
+ * as a workaround for the incorrect value being applied
+ * from transmitter control.
+ */
+ if (!dc_is_virtual_signal(pipe_ctx->stream->signal))
+ stream->link->link_enc->funcs->setup(
+ stream->link->link_enc,
+ pipe_ctx->stream->signal);
+
dc->hwss.enable_stream(pipe_ctx);
/* Set DPS PPS SDP (AKA "info frames") */
@@ -3298,9 +3313,11 @@ void core_link_disable_stream(struct pipe_ctx *pipe_ctx)
write_i2c_redriver_setting(pipe_ctx, false);
}
}
- dc->hwss.disable_stream(pipe_ctx);
disable_link(pipe_ctx->stream->link, pipe_ctx->stream->signal);
+
+ dc->hwss.disable_stream(pipe_ctx);
+
if (pipe_ctx->stream->timing.flags.DSC) {
if (dc_is_dp_signal(pipe_ctx->stream->signal))
dp_set_dsc_enable(pipe_ctx, false);
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link_ddc.c b/drivers/gpu/drm/amd/display/dc/core/dc_link_ddc.c
index aefd29a440b5..b984eecca58b 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_link_ddc.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_link_ddc.c
@@ -503,7 +503,7 @@ bool dal_ddc_service_query_ddc_data(
uint8_t *read_buf,
uint32_t read_size)
{
- bool ret = false;
+ bool success = true;
uint32_t payload_size =
dal_ddc_service_is_in_aux_transaction_mode(ddc) ?
DEFAULT_AUX_MAX_DATA_SIZE : EDID_SEGMENT_SIZE;
@@ -527,7 +527,6 @@ bool dal_ddc_service_query_ddc_data(
* but we want to read 256 over i2c!!!!*/
if (dal_ddc_service_is_in_aux_transaction_mode(ddc)) {
struct aux_payload payload;
- bool read_available = true;
payload.i2c_over_aux = true;
payload.address = address;
@@ -536,21 +535,26 @@ bool dal_ddc_service_query_ddc_data(
if (write_size != 0) {
payload.write = true;
- payload.mot = false;
+ /* should not set mot (middle of transaction) to 0
+ * if there are pending read payloads
+ */
+ payload.mot = read_size == 0 ? false : true;
payload.length = write_size;
payload.data = write_buf;
- ret = dal_ddc_submit_aux_command(ddc, &payload);
- read_available = ret;
+ success = dal_ddc_submit_aux_command(ddc, &payload);
}
- if (read_size != 0 && read_available) {
+ if (read_size != 0 && success) {
payload.write = false;
+ /* should set mot (middle of transaction) to 0
+ * since it is the last payload to send
+ */
payload.mot = false;
payload.length = read_size;
payload.data = read_buf;
- ret = dal_ddc_submit_aux_command(ddc, &payload);
+ success = dal_ddc_submit_aux_command(ddc, &payload);
}
} else {
struct i2c_command command = {0};
@@ -573,7 +577,7 @@ bool dal_ddc_service_query_ddc_data(
command.number_of_payloads =
dal_ddc_i2c_payloads_get_count(&payloads);
- ret = dm_helpers_submit_i2c(
+ success = dm_helpers_submit_i2c(
ddc->ctx,
ddc->link,
&command);
@@ -581,7 +585,7 @@ bool dal_ddc_service_query_ddc_data(
dal_ddc_i2c_payloads_destroy(&payloads);
}
- return ret;
+ return success;
}
bool dal_ddc_submit_aux_command(struct ddc_service *ddc,
@@ -598,7 +602,7 @@ bool dal_ddc_submit_aux_command(struct ddc_service *ddc,
do {
struct aux_payload current_payload;
- bool is_end_of_payload = (retrieved + DEFAULT_AUX_MAX_DATA_SIZE) >
+ bool is_end_of_payload = (retrieved + DEFAULT_AUX_MAX_DATA_SIZE) >=
payload->length;
current_payload.address = payload->address;
@@ -607,7 +611,10 @@ bool dal_ddc_submit_aux_command(struct ddc_service *ddc,
current_payload.i2c_over_aux = payload->i2c_over_aux;
current_payload.length = is_end_of_payload ?
payload->length - retrieved : DEFAULT_AUX_MAX_DATA_SIZE;
- current_payload.mot = !is_end_of_payload;
+ /* set mot (middle of transaction) to false
+ * if it is the last payload
+ */
+ current_payload.mot = is_end_of_payload ? payload->mot:true;
current_payload.reply = payload->reply;
current_payload.write = payload->write;
@@ -648,16 +655,17 @@ bool dc_link_aux_transfer_with_retries(struct ddc_service *ddc,
}
-uint32_t dc_link_aux_configure_timeout(struct ddc_service *ddc,
+bool dc_link_aux_try_to_configure_timeout(struct ddc_service *ddc,
uint32_t timeout)
{
- uint32_t prev_timeout = 0;
+ bool result = false;
struct ddc *ddc_pin = ddc->ddc_pin;
- if (ddc->ctx->dc->res_pool->engines[ddc_pin->pin_data->en]->funcs->configure_timeout)
- prev_timeout =
- ddc->ctx->dc->res_pool->engines[ddc_pin->pin_data->en]->funcs->configure_timeout(ddc, timeout);
- return prev_timeout;
+ if (ddc->ctx->dc->res_pool->engines[ddc_pin->pin_data->en]->funcs->configure_timeout) {
+ ddc->ctx->dc->res_pool->engines[ddc_pin->pin_data->en]->funcs->configure_timeout(ddc, timeout);
+ result = true;
+ }
+ return result;
}
/*test only function*/
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c b/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c
index 91cd884d6f25..3d969f83b3cc 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c
@@ -12,6 +12,8 @@
#include "dc_link_ddc.h"
#include "core_status.h"
#include "dpcd_defs.h"
+#include "dc_dmub_srv.h"
+#include "dce/dmub_hw_lock_mgr.h"
#define DC_LOGGER \
link->ctx->logger
@@ -245,7 +247,7 @@ static uint8_t dc_dp_initialize_scrambling_data_symbols(
static inline bool is_repeater(struct dc_link *link, uint32_t offset)
{
- return (!link->is_lttpr_mode_transparent && offset != 0);
+ return (link->lttpr_non_transparent_mode && offset != 0);
}
static void dpcd_set_lt_pattern_and_lane_settings(
@@ -1038,7 +1040,7 @@ static enum link_training_result perform_clock_recovery_sequence(
/* 3. wait receiver to lock-on*/
wait_time_microsec = lt_settings->cr_pattern_time;
- if (!link->is_lttpr_mode_transparent)
+ if (link->lttpr_non_transparent_mode)
wait_time_microsec = TRAINING_AUX_RD_INTERVAL;
wait_for_training_aux_rd_interval(
@@ -1102,6 +1104,10 @@ static inline enum link_training_result perform_link_training_int(
dpcd_pattern.v1_4.TRAINING_PATTERN_SET = DPCD_TRAINING_PATTERN_VIDEOIDLE;
dpcd_set_training_pattern(link, dpcd_pattern);
+ /* delay 5ms after notifying sink of idle pattern before switching output */
+ if (link->connector_signal != SIGNAL_TYPE_EDP)
+ msleep(5);
+
/* 4. mainlink output idle pattern*/
dp_set_hw_test_pattern(link, DP_TEST_PATTERN_VIDEO_MODE, NULL, 0);
@@ -1268,7 +1274,7 @@ static void configure_lttpr_mode(struct dc_link *link)
link->dpcd_caps.lttpr_caps.mode = repeater_mode;
}
- if (!link->is_lttpr_mode_transparent) {
+ if (link->lttpr_non_transparent_mode) {
DC_LOG_HW_LINK_TRAINING("%s\n Set LTTPR to Non Transparent Mode\n", __func__);
@@ -1473,7 +1479,7 @@ enum link_training_result dc_link_dp_perform_link_training(
&lt_settings);
/* Configure lttpr mode */
- if (!link->is_lttpr_mode_transparent)
+ if (link->lttpr_non_transparent_mode)
configure_lttpr_mode(link);
if (link->ctx->dc->work_arounds.lt_early_cr_pattern)
@@ -1489,7 +1495,7 @@ enum link_training_result dc_link_dp_perform_link_training(
dp_set_fec_ready(link, fec_enable);
- if (!link->is_lttpr_mode_transparent) {
+ if (link->lttpr_non_transparent_mode) {
/* 2. perform link training (set link training done
* to false is done as well)
@@ -1551,6 +1557,12 @@ bool perform_link_training_with_retries(
struct dc_link *link = stream->link;
enum dp_panel_mode panel_mode = dp_get_panel_mode(link);
+ /* We need to do this before the link training to ensure the idle pattern in SST
+ * mode will be sent right after the link training
+ */
+ link->link_enc->funcs->connect_dig_be_to_fe(link->link_enc,
+ pipe_ctx->stream_res.stream_enc->id, true);
+
for (j = 0; j < attempts; ++j) {
dp_enable_link_phy(
@@ -1567,12 +1579,6 @@ bool perform_link_training_with_retries(
dp_set_panel_mode(link, panel_mode);
- /* We need to do this before the link training to ensure the idle pattern in SST
- * mode will be sent right after the link training
- */
- link->link_enc->funcs->connect_dig_be_to_fe(link->link_enc,
- pipe_ctx->stream_res.stream_enc->id, true);
-
if (link->aux_access_disabled) {
dc_link_dp_perform_link_training_skip_aux(link, link_setting);
return true;
@@ -1756,7 +1762,7 @@ static struct dc_link_settings get_max_link_cap(struct dc_link *link)
* account for lttpr repeaters cap
* notes: repeaters do not snoop in the DPRX Capabilities addresses (3.6.3).
*/
- if (!link->is_lttpr_mode_transparent) {
+ if (link->lttpr_non_transparent_mode) {
if (link->dpcd_caps.lttpr_caps.max_lane_count < max_link_cap.lane_count)
max_link_cap.lane_count = link->dpcd_caps.lttpr_caps.max_lane_count;
@@ -1914,7 +1920,7 @@ bool dp_verify_link_cap(
max_link_cap = get_max_link_cap(link);
/* Grant extended timeout request */
- if (!link->is_lttpr_mode_transparent && link->dpcd_caps.lttpr_caps.max_ext_timeout > 0) {
+ if (link->lttpr_non_transparent_mode && link->dpcd_caps.lttpr_caps.max_ext_timeout > 0) {
uint8_t grant = link->dpcd_caps.lttpr_caps.max_ext_timeout & 0x80;
core_link_write_dpcd(link, DP_PHY_REPEATER_EXTENDED_WAIT_TIMEOUT, &grant, sizeof(grant));
@@ -2849,7 +2855,6 @@ bool dc_link_handle_hpd_rx_irq(struct dc_link *link, union hpd_irq_data *out_hpd
enum dc_status result;
bool status = false;
struct pipe_ctx *pipe_ctx;
- struct dc_link_settings previous_link_settings;
int i;
if (out_link_loss)
@@ -2928,32 +2933,25 @@ bool dc_link_handle_hpd_rx_irq(struct dc_link *link, union hpd_irq_data *out_hpd
for (i = 0; i < MAX_PIPES; i++) {
pipe_ctx = &link->dc->current_state->res_ctx.pipe_ctx[i];
if (pipe_ctx && pipe_ctx->stream && pipe_ctx->stream->link == link)
- link->dc->hwss.blank_stream(pipe_ctx);
- }
-
- for (i = 0; i < MAX_PIPES; i++) {
- pipe_ctx = &link->dc->current_state->res_ctx.pipe_ctx[i];
- if (pipe_ctx && pipe_ctx->stream && pipe_ctx->stream->link == link)
break;
}
if (pipe_ctx == NULL || pipe_ctx->stream == NULL)
return false;
- previous_link_settings = link->cur_link_settings;
-
- perform_link_training_with_retries(&previous_link_settings,
- true, LINK_TRAINING_ATTEMPTS,
- pipe_ctx,
- pipe_ctx->stream->signal);
- if (pipe_ctx->stream->signal == SIGNAL_TYPE_DISPLAY_PORT_MST)
- dc_link_reallocate_mst_payload(link);
+ for (i = 0; i < MAX_PIPES; i++) {
+ pipe_ctx = &link->dc->current_state->res_ctx.pipe_ctx[i];
+ if (pipe_ctx && pipe_ctx->stream && !pipe_ctx->stream->dpms_off &&
+ pipe_ctx->stream->link == link)
+ core_link_disable_stream(pipe_ctx);
+ }
for (i = 0; i < MAX_PIPES; i++) {
pipe_ctx = &link->dc->current_state->res_ctx.pipe_ctx[i];
- if (pipe_ctx && pipe_ctx->stream && pipe_ctx->stream->link == link)
- link->dc->hwss.unblank_stream(pipe_ctx, &previous_link_settings);
+ if (pipe_ctx && pipe_ctx->stream && !pipe_ctx->stream->dpms_off &&
+ pipe_ctx->stream->link == link)
+ core_link_enable_stream(link->dc->current_state, pipe_ctx);
}
status = false;
@@ -3255,17 +3253,8 @@ static bool retrieve_link_cap(struct dc_link *link)
uint32_t read_dpcd_retry_cnt = 3;
int i;
struct dp_sink_hw_fw_revision dp_hw_fw_revision;
-
- /* Set default timeout to 3.2ms and read LTTPR capabilities */
- bool ext_timeout_support = link->dc->caps.extended_aux_timeout_support &&
- !link->dc->config.disable_extended_timeout_support;
-
- link->is_lttpr_mode_transparent = true;
-
- if (ext_timeout_support) {
- dc_link_aux_configure_timeout(link->ddc,
- LINK_AUX_DEFAULT_EXTENDED_TIMEOUT_PERIOD);
- }
+ bool is_lttpr_present = false;
+ const uint32_t post_oui_delay = 30; // 30ms
memset(dpcd_data, '\0', sizeof(dpcd_data));
memset(lttpr_dpcd_data, '\0', sizeof(lttpr_dpcd_data));
@@ -3274,6 +3263,13 @@ static bool retrieve_link_cap(struct dc_link *link)
memset(&edp_config_cap, '\0',
sizeof(union edp_configuration_cap));
+ /* if extended timeout is supported in hardware,
+ * default to LTTPR timeout (3.2ms) first as a W/A for DP link layer
+ * CTS 4.2.1.1 regression introduced by CTS specs requirement update.
+ */
+ dc_link_aux_try_to_configure_timeout(link->ddc,
+ LINK_AUX_DEFAULT_LTTPR_TIMEOUT_PERIOD);
+
status = core_link_read_dpcd(link, DP_SET_POWER,
&dpcd_power_state, sizeof(dpcd_power_state));
@@ -3285,6 +3281,12 @@ static bool retrieve_link_cap(struct dc_link *link)
if (status != DC_OK || dpcd_power_state == DP_SET_POWER_D3)
udelay(1000);
+ dpcd_set_source_specific_data(link);
+ /* Sink may need to configure internals based on vendor, so allow some
+ * time before proceeding with possibly vendor specific transactions
+ */
+ msleep(post_oui_delay);
+
for (i = 0; i < read_dpcd_retry_cnt; i++) {
status = core_link_read_dpcd(
link,
@@ -3300,8 +3302,14 @@ static bool retrieve_link_cap(struct dc_link *link)
return false;
}
- if (ext_timeout_support) {
-
+ if (link->dc->caps.extended_aux_timeout_support &&
+ link->dc->config.allow_lttpr_non_transparent_mode) {
+ /* By reading LTTPR capability, RX assumes that we will enable
+ * LTTPR non transparent if LTTPR is present.
+ * Therefore, only query LTTPR capability when both LTTPR
+ * extended aux timeout and
+ * non transparent mode is supported by hardware
+ */
status = core_link_read_dpcd(
link,
DP_LT_TUNABLE_PHY_REPEATER_FIELD_DATA_STRUCTURE_REV,
@@ -3332,20 +3340,21 @@ static bool retrieve_link_cap(struct dc_link *link)
lttpr_dpcd_data[DP_PHY_REPEATER_EXTENDED_WAIT_TIMEOUT -
DP_LT_TUNABLE_PHY_REPEATER_FIELD_DATA_STRUCTURE_REV];
- if (link->dpcd_caps.lttpr_caps.phy_repeater_cnt > 0 &&
+ is_lttpr_present = (link->dpcd_caps.lttpr_caps.phy_repeater_cnt > 0 &&
link->dpcd_caps.lttpr_caps.max_lane_count > 0 &&
link->dpcd_caps.lttpr_caps.max_lane_count <= 4 &&
- link->dpcd_caps.lttpr_caps.revision.raw >= 0x14) {
- link->is_lttpr_mode_transparent = false;
- } else {
- /*No lttpr reset timeout to its default value*/
- link->is_lttpr_mode_transparent = true;
- dc_link_aux_configure_timeout(link->ddc, LINK_AUX_DEFAULT_TIMEOUT_PERIOD);
- }
-
- CONN_DATA_DETECT(link, lttpr_dpcd_data, sizeof(lttpr_dpcd_data), "LTTPR Caps: ");
+ link->dpcd_caps.lttpr_caps.revision.raw >= 0x14);
+ if (is_lttpr_present)
+ CONN_DATA_DETECT(link, lttpr_dpcd_data, sizeof(lttpr_dpcd_data), "LTTPR Caps: ");
}
+ /* decide lttpr non transparent mode */
+ link->lttpr_non_transparent_mode = is_lttpr_present;
+
+ if (!is_lttpr_present)
+ dc_link_aux_try_to_configure_timeout(link->ddc, LINK_AUX_DEFAULT_TIMEOUT_PERIOD);
+
+
{
union training_aux_rd_interval aux_rd_interval;
@@ -3378,7 +3387,7 @@ static bool retrieve_link_cap(struct dc_link *link)
link->dpcd_caps.dpcd_rev.raw =
dpcd_data[DP_DPCD_REV - DP_DPCD_REV];
- if (link->dpcd_caps.dpcd_rev.raw >= 0x14) {
+ if (link->dpcd_caps.ext_receiver_cap_field_present) {
for (i = 0; i < read_dpcd_retry_cnt; i++) {
status = core_link_read_dpcd(
link,
@@ -4034,9 +4043,23 @@ bool dc_link_dp_set_test_pattern(
break;
}
- if (pipe_ctx->stream_res.tg->funcs->lock_doublebuffer_enable)
- pipe_ctx->stream_res.tg->funcs->lock_doublebuffer_enable(
- pipe_ctx->stream_res.tg);
+ if (pipe_ctx->stream_res.tg->funcs->lock_doublebuffer_enable) {
+ if (pipe_ctx->stream && should_use_dmub_lock(pipe_ctx->stream->link)) {
+ union dmub_hw_lock_flags hw_locks = { 0 };
+ struct dmub_hw_lock_inst_flags inst_flags = { 0 };
+
+ hw_locks.bits.lock_dig = 1;
+ inst_flags.dig_inst = pipe_ctx->stream_res.tg->inst;
+
+ dmub_hw_lock_mgr_cmd(link->ctx->dmub_srv,
+ true,
+ &hw_locks,
+ &inst_flags);
+ } else
+ pipe_ctx->stream_res.tg->funcs->lock_doublebuffer_enable(
+ pipe_ctx->stream_res.tg);
+ }
+
pipe_ctx->stream_res.tg->funcs->lock(pipe_ctx->stream_res.tg);
/* update MSA to requested color space */
pipe_ctx->stream_res.stream_enc->funcs->dp_set_stream_attribute(pipe_ctx->stream_res.stream_enc,
@@ -4063,9 +4086,24 @@ bool dc_link_dp_set_test_pattern(
CRTC_STATE_VBLANK);
pipe_ctx->stream_res.tg->funcs->wait_for_state(pipe_ctx->stream_res.tg,
CRTC_STATE_VACTIVE);
- if (pipe_ctx->stream_res.tg->funcs->lock_doublebuffer_disable)
- pipe_ctx->stream_res.tg->funcs->lock_doublebuffer_disable(
- pipe_ctx->stream_res.tg);
+
+ if (pipe_ctx->stream_res.tg->funcs->lock_doublebuffer_disable) {
+ if (pipe_ctx->stream && should_use_dmub_lock(pipe_ctx->stream->link)) {
+ union dmub_hw_lock_flags hw_locks = { 0 };
+ struct dmub_hw_lock_inst_flags inst_flags = { 0 };
+
+ hw_locks.bits.lock_dig = 1;
+ inst_flags.dig_inst = pipe_ctx->stream_res.tg->inst;
+
+ dmub_hw_lock_mgr_cmd(link->ctx->dmub_srv,
+ false,
+ &hw_locks,
+ &inst_flags);
+ } else
+ pipe_ctx->stream_res.tg->funcs->lock_doublebuffer_disable(
+ pipe_ctx->stream_res.tg);
+ }
+
/* Set Test Pattern state */
link->test_pattern_enabled = true;
}
@@ -4255,7 +4293,6 @@ void dp_set_fec_enable(struct dc_link *link, bool enable)
void dpcd_set_source_specific_data(struct dc_link *link)
{
- const uint32_t post_oui_delay = 30; // 30ms
uint8_t dspc = 0;
enum dc_status ret;
@@ -4296,10 +4333,6 @@ void dpcd_set_source_specific_data(struct dc_link *link)
link->dc->vendor_signature.data.raw,
sizeof(link->dc->vendor_signature.data.raw));
}
-
- // Sink may need to configure internals based on vendor, so allow some
- // time before proceeding with possibly vendor specific transactions
- msleep(post_oui_delay);
}
bool dc_link_set_backlight_level_nits(struct dc_link *link,
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link_hwss.c b/drivers/gpu/drm/amd/display/dc/core/dc_link_hwss.c
index 6590f51caefa..1b3474aa380d 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_link_hwss.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_link_hwss.c
@@ -167,7 +167,8 @@ bool edp_receiver_ready_T9(struct dc_link *link)
} while (++tries < 50);
}
- if (link->local_sink->edid_caps.panel_patch.extra_delay_backlight_off > 0)
+ if (link->local_sink &&
+ link->local_sink->edid_caps.panel_patch.extra_delay_backlight_off > 0)
udelay(link->local_sink->edid_caps.panel_patch.extra_delay_backlight_off * 1000);
return result;
@@ -201,7 +202,8 @@ bool edp_receiver_ready_T7(struct dc_link *link)
} while (time_taken_in_ns < 50 * 1000000); //MAx T7 is 50ms
}
- if (link->local_sink->edid_caps.panel_patch.extra_t7_ms > 0)
+ if (link->local_sink &&
+ link->local_sink->edid_caps.panel_patch.extra_t7_ms > 0)
udelay(link->local_sink->edid_caps.panel_patch.extra_t7_ms * 1000);
return result;
@@ -281,7 +283,7 @@ void dp_set_hw_lane_settings(
{
struct link_encoder *encoder = link->link_enc;
- if (!link->is_lttpr_mode_transparent && !is_immediate_downstream(link, offset))
+ if (link->lttpr_non_transparent_mode && !is_immediate_downstream(link, offset))
return;
/* call Encoder to set lane settings */
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
index 0c5619364e7d..1000dc6daf72 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
@@ -52,6 +52,9 @@
#include "dcn20/dcn20_resource.h"
#include "dcn21/dcn21_resource.h"
#endif
+#if defined(CONFIG_DRM_AMD_DC_DCN3_0)
+#include "../dcn30/dcn30_resource.h"
+#endif
#define DC_LOGGER_INIT(logger)
@@ -107,6 +110,10 @@ enum dce_version resource_parse_asic_id(struct hw_asic_id asic_id)
case FAMILY_NV:
dc_version = DCN_VERSION_2_0;
+#if defined(CONFIG_DRM_AMD_DC_DCN3_0)
+ if (ASICREV_IS_SIENNA_CICHLID_P(asic_id.hw_internal_rev))
+ dc_version = DCN_VERSION_3_0;
+#endif
break;
default:
dc_version = DCE_VERSION_UNKNOWN;
@@ -168,6 +175,11 @@ struct resource_pool *dc_create_resource_pool(struct dc *dc,
res_pool = dcn21_create_resource_pool(init_data, dc);
break;
#endif
+#if defined(CONFIG_DRM_AMD_DC_DCN3_0)
+ case DCN_VERSION_3_0:
+ res_pool = dcn30_create_resource_pool(init_data, dc);
+ break;
+#endif
default:
break;
@@ -282,6 +294,16 @@ bool resource_construct(
}
}
+#if defined(CONFIG_DRM_AMD_DC_DCN3_0)
+ for (i = 0; i < caps->num_mpc_3dlut; i++) {
+ pool->mpc_lut[i] = dc_create_3dlut_func();
+ if (pool->mpc_lut[i] == NULL)
+ DC_ERR("DC: failed to create MPC 3dlut!\n");
+ pool->mpc_shaper[i] = dc_create_transfer_func();
+ if (pool->mpc_shaper[i] == NULL)
+ DC_ERR("DC: failed to create MPC shaper!\n");
+ }
+#endif
dc->caps.dynamic_audio = false;
if (pool->audio_count < pool->stream_enc_count) {
dc->caps.dynamic_audio = true;
@@ -377,6 +399,10 @@ bool resource_are_streams_timing_synchronizable(
!= stream2->timing.v_addressable)
return false;
+ if (stream1->timing.v_front_porch
+ != stream2->timing.v_front_porch)
+ return false;
+
if (stream1->timing.pix_clk_100hz
!= stream2->timing.pix_clk_100hz)
return false;
@@ -2049,8 +2075,16 @@ enum dc_status resource_map_pool_resources(
}
/* Add ABM to the resource if on EDP */
- if (pipe_ctx->stream && dc_is_embedded_signal(pipe_ctx->stream->signal))
+ if (pipe_ctx->stream && dc_is_embedded_signal(pipe_ctx->stream->signal)) {
+#if defined(CONFIG_DRM_AMD_DC_DCN3_0)
+ if (pool->abm)
+ pipe_ctx->stream_res.abm = pool->abm;
+ else
+ pipe_ctx->stream_res.abm = pool->multiple_abms[pipe_ctx->stream_res.tg->inst];
+#else
pipe_ctx->stream_res.abm = pool->abm;
+#endif
+ }
for (i = 0; i < context->stream_count; i++)
if (context->streams[i] == stream) {
@@ -2867,6 +2901,10 @@ unsigned int resource_pixel_format_to_bpp(enum surface_pixel_format format)
case SURFACE_PIXEL_FORMAT_GRPH_ARGB2101010:
case SURFACE_PIXEL_FORMAT_GRPH_ABGR2101010:
case SURFACE_PIXEL_FORMAT_GRPH_ABGR2101010_XR_BIAS:
+#if defined(CONFIG_DRM_AMD_DC_DCN3_0)
+ case SURFACE_PIXEL_FORMAT_GRPH_RGBE:
+ case SURFACE_PIXEL_FORMAT_GRPH_RGBE_ALPHA:
+#endif
return 32;
case SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616:
case SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616F:
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_stream.c b/drivers/gpu/drm/amd/display/dc/core/dc_stream.c
index 4f0e7203dba4..3b897372ed27 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_stream.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_stream.c
@@ -243,6 +243,9 @@ bool dc_stream_set_cursor_attributes(
struct dc *dc;
struct resource_context *res_ctx;
struct pipe_ctx *pipe_to_program = NULL;
+#if defined(CONFIG_DRM_AMD_DC_DCN3_0)
+ bool reset_idle_optimizations = false;
+#endif
if (NULL == stream) {
dm_error("DC: dc_stream is NULL!\n");
@@ -262,6 +265,15 @@ bool dc_stream_set_cursor_attributes(
res_ctx = &dc->current_state->res_ctx;
stream->cursor_attributes = *attributes;
+#if defined(CONFIG_DRM_AMD_DC_DCN3_0)
+ /* disable idle optimizations while updating cursor */
+ if (dc->idle_optimizations_allowed) {
+ dc->hwss.apply_idle_power_optimizations(dc, false);
+ reset_idle_optimizations = true;
+ }
+
+#endif
+
for (i = 0; i < MAX_PIPES; i++) {
struct pipe_ctx *pipe_ctx = &res_ctx->pipe_ctx[i];
@@ -281,6 +293,12 @@ bool dc_stream_set_cursor_attributes(
if (pipe_to_program)
dc->hwss.cursor_lock(dc, pipe_to_program, false);
+#if defined(CONFIG_DRM_AMD_DC_DCN3_0)
+ /* re-enable idle optimizations if necessary */
+ if (reset_idle_optimizations)
+ dc->hwss.apply_idle_power_optimizations(dc, true);
+
+#endif
return true;
}
@@ -292,6 +310,9 @@ bool dc_stream_set_cursor_position(
struct dc *dc;
struct resource_context *res_ctx;
struct pipe_ctx *pipe_to_program = NULL;
+#if defined(CONFIG_DRM_AMD_DC_DCN3_0)
+ bool reset_idle_optimizations = false;
+#endif
if (NULL == stream) {
dm_error("DC: dc_stream is NULL!\n");
@@ -305,6 +326,16 @@ bool dc_stream_set_cursor_position(
dc = stream->ctx->dc;
res_ctx = &dc->current_state->res_ctx;
+#if defined(CONFIG_DRM_AMD_DC_DCN3_0)
+
+ /* disable idle optimizations if enabling cursor */
+ if (dc->idle_optimizations_allowed &&
+ !stream->cursor_position.enable && position->enable) {
+ dc->hwss.apply_idle_power_optimizations(dc, false);
+ reset_idle_optimizations = true;
+ }
+
+#endif
stream->cursor_position = *position;
for (i = 0; i < MAX_PIPES; i++) {
@@ -328,6 +359,12 @@ bool dc_stream_set_cursor_position(
if (pipe_to_program)
dc->hwss.cursor_lock(dc, pipe_to_program, false);
+#if defined(CONFIG_DRM_AMD_DC_DCN3_0)
+ /* re-enable idle optimizations if necessary */
+ if (reset_idle_optimizations)
+ dc->hwss.apply_idle_power_optimizations(dc, true);
+
+#endif
return true;
}
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_vm_helper.c b/drivers/gpu/drm/amd/display/dc/core/dc_vm_helper.c
index 64cf24a9ab08..f2b39ec35c89 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_vm_helper.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_vm_helper.c
@@ -47,9 +47,6 @@ int dc_setup_system_context(struct dc *dc, struct dc_phy_addr_space_config *pa_c
*/
memcpy(&dc->vm_pa_config, pa_config, sizeof(struct dc_phy_addr_space_config));
dc->vm_pa_config.valid = true;
-
- if (pa_config->is_hvm_enabled == 0)
- dc->debug.nv12_iflip_vm_wa = false;
}
return num_vmids;
diff --git a/drivers/gpu/drm/amd/display/dc/dc.h b/drivers/gpu/drm/amd/display/dc/dc.h
index 85908561c741..f7cb1354a635 100644
--- a/drivers/gpu/drm/amd/display/dc/dc.h
+++ b/drivers/gpu/drm/amd/display/dc/dc.h
@@ -42,7 +42,7 @@
#include "inc/hw/dmcu.h"
#include "dml/display_mode_lib.h"
-#define DC_VER "3.2.84"
+#define DC_VER "3.2.91"
#define MAX_SURFACES 3
#define MAX_PLANES 6
@@ -186,6 +186,15 @@ struct dc_dcc_setting {
unsigned int max_compressed_blk_size;
unsigned int max_uncompressed_blk_size;
bool independent_64b_blks;
+#if defined(CONFIG_DRM_AMD_DC_DCN3_0)
+ //These bitfields to be used starting with DCN 3.0
+ struct {
+ uint32_t dcc_256_64_64 : 1;//available in ASICs before DCN 3.0 (the worst compression case)
+ uint32_t dcc_128_128_uncontrained : 1; //available in ASICs before DCN 3.0
+ uint32_t dcc_256_128_128 : 1; //available starting with DCN 3.0
+ uint32_t dcc_256_256_unconstrained : 1; //available in ASICs before DCN 3.0 (the best compression case)
+ } dcc_controls;
+#endif
};
struct dc_surface_dcc_cap {
@@ -274,10 +283,13 @@ struct dc_config {
bool edp_not_connected;
bool force_enum_edp;
bool forced_clocks;
- bool disable_extended_timeout_support; // Used to disable extended timeout and lttpr feature as well
+ bool allow_lttpr_non_transparent_mode;
bool multi_mon_pp_mclk_switch;
bool disable_dmcu;
bool enable_4to1MPC;
+#if defined(CONFIG_DRM_AMD_DC_DCN3_0)
+ bool clamp_min_dcfclk;
+#endif
};
enum visual_confirm {
@@ -325,6 +337,7 @@ enum dcn_pwr_state {
struct dc_clocks {
int dispclk_khz;
int dppclk_khz;
+ int disp_dpp_voltage_level_khz;
int dcfclk_khz;
int socclk_khz;
int dcfclk_deep_sleep_khz;
@@ -455,10 +468,16 @@ struct dc_debug_options {
bool skip_detection_link_training;
bool remove_disconnect_edp;
unsigned int force_odm_combine; //bit vector based on otg inst
+#if defined(CONFIG_DRM_AMD_DC_DCN3_0)
+ unsigned int force_odm_combine_4to1; //bit vector based on otg inst
+#endif
unsigned int force_fclk_khz;
bool disable_tri_buf;
bool dmub_offload_enabled;
bool dmcub_emulation;
+#if defined(CONFIG_DRM_AMD_DC_DCN3_0)
+ bool disable_idle_power_optimizations;
+#endif
bool dmub_command_table; /* for testing only */
struct dc_bw_validation_profile bw_val_profile;
bool disable_fec;
@@ -467,11 +486,13 @@ struct dc_debug_options {
* watermarks are not affected.
*/
unsigned int force_min_dcfclk_mhz;
+#if defined(CONFIG_DRM_AMD_DC_DCN3_0)
+ int dwb_fi_phase;
+#endif
bool disable_timing_sync;
bool cm_in_bypass;
int force_clock_mode;/*every mode change.*/
- bool nv12_iflip_vm_wa;
bool disable_dram_clock_change_vactive_support;
bool validate_dml_output;
bool enable_dmcub_surface_flip;
@@ -573,6 +594,9 @@ struct dc {
/* Require to optimize clocks and bandwidth for added/removed planes */
bool optimized_required;
bool wm_optimized_required;
+#if defined(CONFIG_DRM_AMD_DC_DCN3_0)
+ bool idle_optimizations_allowed;
+#endif
/* Require to maintain clocks and bandwidth for UEFI enabled HW */
int optimize_seamless_boot_streams;
@@ -629,6 +653,9 @@ struct dc_init_data {
*/
const struct gpu_info_soc_bounding_box_v1_0 *soc_bounding_box;
struct dpcd_vendor_signature vendor_signature;
+#if defined(CONFIG_DRM_AMD_DC_DCN3_0)
+ bool force_smu_not_present;
+#endif
};
struct dc_callback_init {
@@ -822,6 +849,9 @@ struct dc_plane_state {
struct dc_transfer_func *in_shaper_func;
struct dc_transfer_func *blend_tf;
+#if defined(CONFIG_DRM_AMD_DC_DCN3_0)
+ struct dc_transfer_func *gamcor_tf;
+#endif
enum surface_pixel_format format;
enum dc_rotation_angle rotation;
enum plane_stereo_format stereo_format;
@@ -967,6 +997,14 @@ void dc_resource_state_construct(
const struct dc *dc,
struct dc_state *dst_ctx);
+#if defined(CONFIG_DRM_AMD_DC_DCN3_0)
+bool dc_acquire_release_mpc_3dlut(
+ struct dc *dc, bool acquire,
+ struct dc_stream_state *stream,
+ struct dc_3dlut **lut,
+ struct dc_transfer_func **shaper);
+#endif
+
void dc_resource_state_copy_construct(
const struct dc_state *src_ctx,
struct dc_state *dst_ctx);
@@ -1086,6 +1124,10 @@ struct hdcp_caps {
#include "dc_link.h"
+#if defined(CONFIG_DRM_AMD_DC_DCN3_0)
+uint32_t dc_get_opp_for_plane(struct dc *dc, struct dc_plane_state *plane);
+
+#endif
/*******************************************************************************
* Sink Interfaces - A sink corresponds to a display output device
******************************************************************************/
@@ -1199,6 +1241,23 @@ bool dc_is_dmcu_initialized(struct dc *dc);
enum dc_status dc_set_clock(struct dc *dc, enum dc_clock_type clock_type, uint32_t clk_khz, uint32_t stepping);
void dc_get_clock(struct dc *dc, enum dc_clock_type clock_type, struct dc_clock_config *clock_cfg);
+#if defined(CONFIG_DRM_AMD_DC_DCN3_0)
+
+void dc_allow_idle_optimizations(struct dc *dc, bool allow);
+
+/*
+ * blank all streams, and set min and max memory clock to
+ * lowest and highest DPM level, respectively
+ */
+void dc_unlock_memory_clock_frequency(struct dc *dc);
+
+/*
+ * set min memory clock to the min required for current mode,
+ * max to maxDPM, and unblank streams
+ */
+void dc_lock_memory_clock_frequency(struct dc *dc);
+
+#endif
/*******************************************************************************
* DSC Interfaces
******************************************************************************/
diff --git a/drivers/gpu/drm/amd/display/dc/dc_bios_types.h b/drivers/gpu/drm/amd/display/dc/dc_bios_types.h
index b1dd0d60d98e..845a3054f21f 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_bios_types.h
+++ b/drivers/gpu/drm/amd/display/dc/dc_bios_types.h
@@ -89,7 +89,6 @@ struct dc_vbios_funcs {
bool (*is_device_id_supported)(
struct dc_bios *bios,
struct device_id id);
-
/* COMMANDS */
enum bp_result (*encoder_control)(
@@ -131,6 +130,9 @@ struct dc_vbios_funcs {
enum bp_result (*get_board_layout_info)(
struct dc_bios *dcb,
struct board_layout_info *board_layout_info);
+ uint16_t (*pack_data_tables)(
+ struct dc_bios *dcb,
+ void *dst);
};
struct bios_registers {
@@ -151,6 +153,7 @@ struct dc_bios {
struct integrated_info *integrated_info;
struct dc_firmware_info fw_info;
bool fw_info_valid;
+ struct dc_vram_info vram_info;
};
#endif /* DC_BIOS_TYPES_H */
diff --git a/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c b/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c
index eea2429ac67d..96532f7ba480 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c
+++ b/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c
@@ -106,29 +106,17 @@ void dc_dmub_srv_wait_idle(struct dc_dmub_srv *dc_dmub_srv)
DC_ERROR("Error waiting for DMUB idle: status=%d\n", status);
}
-void dc_dmub_srv_wait_phy_init(struct dc_dmub_srv *dc_dmub_srv)
+bool dc_dmub_srv_optimized_init_done(struct dc_dmub_srv *dc_dmub_srv)
{
- struct dmub_srv *dmub = dc_dmub_srv->dmub;
- struct dc_context *dc_ctx = dc_dmub_srv->ctx;
- enum dmub_status status;
+ struct dmub_srv *dmub;
+ union dmub_fw_boot_status status;
- for (;;) {
- /* Wait up to a second for PHY init. */
- status = dmub_srv_wait_for_phy_init(dmub, 1000000);
- if (status == DMUB_STATUS_OK)
- /* Initialization OK */
- break;
+ if (!dc_dmub_srv || !dc_dmub_srv->dmub)
+ return false;
- DC_ERROR("DMCUB PHY init failed: status=%d\n", status);
- ASSERT(0);
+ dmub = dc_dmub_srv->dmub;
- if (status != DMUB_STATUS_TIMEOUT)
- /*
- * Server likely initialized or we don't have
- * DMCUB HW support - this won't end.
- */
- break;
+ status = dmub->hw_funcs.get_fw_status(dmub);
- /* Continue spinning so we don't hang the ASIC. */
- }
+ return status.bits.optimized_init_done;
}
diff --git a/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.h b/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.h
index a3a09ccb6d26..8bd20d0d7689 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.h
+++ b/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.h
@@ -56,4 +56,6 @@ void dc_dmub_srv_wait_idle(struct dc_dmub_srv *dc_dmub_srv);
void dc_dmub_srv_wait_phy_init(struct dc_dmub_srv *dc_dmub_srv);
+bool dc_dmub_srv_optimized_init_done(struct dc_dmub_srv *dc_dmub_srv);
+
#endif /* _DMUB_DC_SRV_H_ */
diff --git a/drivers/gpu/drm/amd/display/dc/dc_hw_types.h b/drivers/gpu/drm/amd/display/dc/dc_hw_types.h
index a8dc3082e3e1..b7a8c71e3e39 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_hw_types.h
+++ b/drivers/gpu/drm/amd/display/dc/dc_hw_types.h
@@ -62,6 +62,9 @@ enum dc_plane_addr_type {
PLN_ADDR_TYPE_GRAPHICS = 0,
PLN_ADDR_TYPE_GRPH_STEREO,
PLN_ADDR_TYPE_VIDEO_PROGRESSIVE,
+#if defined(CONFIG_DRM_AMD_DC_DCN3_0)
+ PLN_ADDR_TYPE_RGBEA
+#endif
};
struct dc_plane_address {
@@ -84,6 +87,16 @@ struct dc_plane_address {
PHYSICAL_ADDRESS_LOC right_meta_addr;
union large_integer right_dcc_const_color;
+#if defined(CONFIG_DRM_AMD_DC_DCN3_0)
+ PHYSICAL_ADDRESS_LOC left_alpha_addr;
+ PHYSICAL_ADDRESS_LOC left_alpha_meta_addr;
+ union large_integer left_alpha_dcc_const_color;
+
+ PHYSICAL_ADDRESS_LOC right_alpha_addr;
+ PHYSICAL_ADDRESS_LOC right_alpha_meta_addr;
+ union large_integer right_alpha_dcc_const_color;
+#endif
+
} grph_stereo;
/*video progressive*/
@@ -96,6 +109,18 @@ struct dc_plane_address {
PHYSICAL_ADDRESS_LOC chroma_meta_addr;
union large_integer chroma_dcc_const_color;
} video_progressive;
+
+#if defined(CONFIG_DRM_AMD_DC_DCN3_0)
+ struct {
+ PHYSICAL_ADDRESS_LOC addr;
+ PHYSICAL_ADDRESS_LOC meta_addr;
+ union large_integer dcc_const_color;
+
+ PHYSICAL_ADDRESS_LOC alpha_addr;
+ PHYSICAL_ADDRESS_LOC alpha_meta_addr;
+ union large_integer alpha_dcc_const_color;
+ } rgbea;
+#endif
};
union large_integer page_table_base;
@@ -131,9 +156,15 @@ struct dc_plane_dcc_param {
int meta_pitch;
bool independent_64b_blks;
+#if defined(CONFIG_DRM_AMD_DC_DCN3_0)
+ uint8_t dcc_ind_blk;
+#endif
int meta_pitch_c;
bool independent_64b_blks_c;
+#if defined(CONFIG_DRM_AMD_DC_DCN3_0)
+ uint8_t dcc_ind_blk_c;
+#endif
};
/*Displayable pixel format in fb*/
@@ -169,6 +200,10 @@ enum surface_pixel_format {
SURFACE_PIXEL_FORMAT_GRPH_BGR101111_FIX,
SURFACE_PIXEL_FORMAT_GRPH_RGB111110_FLOAT,
SURFACE_PIXEL_FORMAT_GRPH_BGR101111_FLOAT,
+#if defined(CONFIG_DRM_AMD_DC_DCN3_0)
+ SURFACE_PIXEL_FORMAT_GRPH_RGBE,
+ SURFACE_PIXEL_FORMAT_GRPH_RGBE_ALPHA,
+#endif
SURFACE_PIXEL_FORMAT_VIDEO_BEGIN,
SURFACE_PIXEL_FORMAT_VIDEO_420_YCbCr =
SURFACE_PIXEL_FORMAT_VIDEO_BEGIN,
@@ -355,6 +390,7 @@ union dc_tiling_info {
bool meta_linear;
bool rb_aligned;
bool pipe_aligned;
+ unsigned int num_pkrs;
} gfx9;
};
@@ -813,6 +849,42 @@ enum dwb_stereo_type {
DWB_STEREO_TYPE_FRAME_SEQUENTIAL = 3, /* Frame sequential */
};
+#ifdef CONFIG_DRM_AMD_DC_DCN3_0
+
+enum dwb_out_format {
+ DWB_OUT_FORMAT_32BPP_ARGB = 0,
+ DWB_OUT_FORMAT_32BPP_RGBA = 1,
+ DWB_OUT_FORMAT_64BPP_ARGB = 2,
+ DWB_OUT_FORMAT_64BPP_RGBA = 3
+};
+
+enum dwb_out_denorm {
+ DWB_OUT_DENORM_10BPC = 0,
+ DWB_OUT_DENORM_8BPC = 1,
+ DWB_OUT_DENORM_BYPASS = 2
+};
+
+enum cm_gamut_remap_select {
+ CM_GAMUT_REMAP_MODE_BYPASS = 0,
+ CM_GAMUT_REMAP_MODE_RAMA_COEFF,
+ CM_GAMUT_REMAP_MODE_RAMB_COEFF,
+ CM_GAMUT_REMAP_MODE_RESERVED
+};
+
+enum cm_gamut_coef_format {
+ CM_GAMUT_REMAP_COEF_FORMAT_S2_13 = 0,
+ CM_GAMUT_REMAP_COEF_FORMAT_S3_12 = 1
+};
+
+struct mcif_warmup_params {
+ union large_integer start_address;
+ unsigned int address_increment;
+ unsigned int region_size;
+ unsigned int p_vmid;
+};
+
+#endif
+
#define MCIF_BUF_COUNT 4
struct mcif_buf_params {
@@ -822,6 +894,9 @@ struct mcif_buf_params {
unsigned int chroma_pitch;
unsigned int warmup_pitch;
unsigned int swlock;
+#ifdef CONFIG_DRM_AMD_DC_DCN3_0
+ unsigned int p_vmid;
+#endif
};
diff --git a/drivers/gpu/drm/amd/display/dc/dc_link.h b/drivers/gpu/drm/amd/display/dc/dc_link.h
index f63fc25aa6c5..e002ef706e1d 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_link.h
+++ b/drivers/gpu/drm/amd/display/dc/dc_link.h
@@ -100,7 +100,7 @@ struct dc_link {
bool link_state_valid;
bool aux_access_disabled;
bool sync_lt_in_progress;
- bool is_lttpr_mode_transparent;
+ bool lttpr_non_transparent_mode;
/* caps is the same as reported_link_cap. link_traing use
* reported_link_cap. Will clean up. TODO
@@ -311,8 +311,8 @@ bool dc_link_detect_sink(struct dc_link *link, enum dc_connection_type *type);
*/
#ifdef CONFIG_DRM_AMD_DC_HDCP
-bool dc_link_is_hdcp14(struct dc_link *link);
-bool dc_link_is_hdcp22(struct dc_link *link);
+bool dc_link_is_hdcp14(struct dc_link *link, enum signal_type signal);
+bool dc_link_is_hdcp22(struct dc_link *link, enum signal_type signal);
#endif
void dc_link_set_drive_settings(struct dc *dc,
struct link_training_settings *lt_settings,
diff --git a/drivers/gpu/drm/amd/display/dc/dc_stream.h b/drivers/gpu/drm/amd/display/dc/dc_stream.h
index 49aad691e687..f2ed9bc5a319 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_stream.h
+++ b/drivers/gpu/drm/amd/display/dc/dc_stream.h
@@ -87,6 +87,13 @@ struct dc_writeback_info {
int dwb_pipe_inst;
struct dc_dwb_params dwb_params;
struct mcif_buf_params mcif_buf_params;
+#if defined(CONFIG_DRM_AMD_DC_DCN3_0)
+ struct mcif_warmup_params mcif_warmup_params;
+ /* the plane that is the input to TOP_MUX for MPCC that is the DWB source */
+ struct dc_plane_state *writeback_source_plane;
+ /* source MPCC instance. for use by internally by dc */
+ int mpcc_inst;
+#endif
};
struct dc_writeback_update {
@@ -200,6 +207,10 @@ struct dc_stream_state {
/* writeback */
unsigned int num_wb_info;
struct dc_writeback_info writeback_info[MAX_DWB_PIPES];
+#if defined(CONFIG_DRM_AMD_DC_DCN3_0)
+ const struct dc_transfer_func *func_shaper;
+ const struct dc_3dlut *lut3d_func;
+#endif
/* Computed state bits */
bool mode_changed : 1;
@@ -251,6 +262,10 @@ struct dc_stream_update {
struct dc_writeback_update *wb_update;
struct dc_dsc_config *dsc_config;
+#if defined(CONFIG_DRM_AMD_DC_DCN3_0)
+ struct dc_transfer_func *func_shaper;
+ struct dc_3dlut *lut3d_func;
+#endif
};
bool dc_is_stream_unchanged(
diff --git a/drivers/gpu/drm/amd/display/dc/dc_types.h b/drivers/gpu/drm/amd/display/dc/dc_types.h
index f236da1c1859..d64241433548 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_types.h
+++ b/drivers/gpu/drm/amd/display/dc/dc_types.h
@@ -255,6 +255,8 @@ struct dc_edid_caps {
uint8_t qs_bit;
uint8_t qy_bit;
+ uint32_t max_tmds_clk_mhz;
+
/*HDMI 2.0 caps*/
bool lte_340mcsc_scramble;
@@ -475,6 +477,19 @@ enum display_content_type {
DISPLAY_CONTENT_TYPE_GAME = 8
};
+#if defined(CONFIG_DRM_AMD_DC_DCN3_0)
+enum cm_gamut_adjust_type {
+ CM_GAMUT_ADJUST_TYPE_BYPASS = 0,
+ CM_GAMUT_ADJUST_TYPE_HW, /* without adjustments */
+ CM_GAMUT_ADJUST_TYPE_SW /* use adjustments */
+};
+
+struct cm_grph_csc_adjustment {
+ struct fixed31_32 temperature_matrix[12];
+ enum cm_gamut_adjust_type gamut_adjust_type;
+ enum cm_gamut_coef_format gamut_coef_format;
+};
+#endif
/* writeback */
struct dwb_stereo_params {
bool stereo_enabled; /* false: normal mode, true: 3D stereo */
@@ -492,9 +507,21 @@ struct dc_dwb_cnv_params {
unsigned int crop_x; /* cropped window start x value at cnv output */
unsigned int crop_y; /* cropped window start y value at cnv output */
enum dwb_cnv_out_bpc cnv_out_bpc; /* cnv output pixel depth - 8bpc or 10bpc */
+#if defined(CONFIG_DRM_AMD_DC_DCN3_0)
+ enum dwb_out_format fc_out_format; /* dwb output pixel format - 2101010 or 16161616 and ARGB or RGBA */
+ enum dwb_out_denorm out_denorm_mode;/* dwb output denormalization mode */
+ unsigned int out_max_pix_val;/* pixel values greater than out_max_pix_val are clamped to out_max_pix_val */
+ unsigned int out_min_pix_val;/* pixel values less than out_min_pix_val are clamped to out_min_pix_val */
+#endif
};
struct dc_dwb_params {
+#if defined(CONFIG_DRM_AMD_DC_DCN3_0)
+ unsigned int dwbscl_black_color; /* must be in FP1.5.10 */
+ unsigned int hdr_mult; /* must be in FP1.6.12 */
+ struct cm_grph_csc_adjustment csc_params;
+ struct dwb_stereo_params stereo_params;
+#endif
struct dc_dwb_cnv_params cnv_params; /* CNV source size and cropping window parameters */
unsigned int dest_width; /* Destination width */
unsigned int dest_height; /* Destination height */
@@ -862,6 +889,15 @@ struct dsc_dec_dpcd_caps {
uint32_t branch_max_line_width;
};
+#if defined(CONFIG_DRM_AMD_DC_DCN3_0)
+enum dc_gpu_mem_alloc_type {
+ DC_MEM_ALLOC_TYPE_GART,
+ DC_MEM_ALLOC_TYPE_FRAME_BUFFER,
+ DC_MEM_ALLOC_TYPE_INVISIBLE_FRAME_BUFFER,
+ DC_MEM_ALLOC_TYPE_AGP
+};
+
+#endif
enum dc_psr_version {
DC_PSR_VERSION_1 = 0,
DC_PSR_VERSION_UNSUPPORTED = 0xFFFFFFFF,
diff --git a/drivers/gpu/drm/amd/display/dc/dce/Makefile b/drivers/gpu/drm/amd/display/dc/dce/Makefile
index f704a8fd52e8..973be8f9fd10 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/Makefile
+++ b/drivers/gpu/drm/amd/display/dc/dce/Makefile
@@ -29,7 +29,8 @@
DCE = dce_audio.o dce_stream_encoder.o dce_link_encoder.o dce_hwseq.o \
dce_mem_input.o dce_clock_source.o dce_scl_filters.o dce_transform.o \
dce_opp.o dce_dmcu.o dce_abm.o dce_ipp.o dce_aux.o \
-dce_i2c.o dce_i2c_hw.o dce_i2c_sw.o dmub_psr.o dmub_abm.o dce_panel_cntl.o
+dce_i2c.o dce_i2c_hw.o dce_i2c_sw.o dmub_psr.o dmub_abm.o dce_panel_cntl.o \
+dmub_hw_lock_mgr.o
AMD_DAL_DCE = $(addprefix $(AMDDALPATH)/dc/dce/,$(DCE))
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_abm.h b/drivers/gpu/drm/amd/display/dc/dce/dce_abm.h
index 9718a4823372..a44effcda49f 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_abm.h
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_abm.h
@@ -76,6 +76,22 @@
SR(DC_ABM1_HGLS_REG_READ_PROGRESS), \
NBIO_SR(BIOS_SCRATCH_2)
+#if defined(CONFIG_DRM_AMD_DC_DCN3_0)
+#define ABM_DCN301_REG_LIST(id)\
+ ABM_COMMON_REG_LIST_DCE_BASE(), \
+ SRI(DC_ABM1_HG_SAMPLE_RATE, ABM, id), \
+ SRI(DC_ABM1_LS_SAMPLE_RATE, ABM, id), \
+ SRI(BL1_PWM_BL_UPDATE_SAMPLE_RATE, ABM, id), \
+ SRI(DC_ABM1_HG_MISC_CTRL, ABM, id), \
+ SRI(DC_ABM1_IPCSC_COEFF_SEL, ABM, id), \
+ SRI(BL1_PWM_CURRENT_ABM_LEVEL, ABM, id), \
+ SRI(BL1_PWM_TARGET_ABM_LEVEL, ABM, id), \
+ SRI(BL1_PWM_USER_LEVEL, ABM, id), \
+ SRI(DC_ABM1_LS_MIN_MAX_PIXEL_VALUE_THRES, ABM, id), \
+ SRI(DC_ABM1_HGLS_REG_READ_PROGRESS, ABM, id), \
+ NBIO_SR(BIOS_SCRATCH_2)
+#endif
+
#define ABM_SF(reg_name, field_name, post_fix)\
.field_name = reg_name ## __ ## field_name ## post_fix
@@ -149,6 +165,10 @@
#define ABM_MASK_SH_LIST_DCN20(mask_sh) ABM_MASK_SH_LIST_DCE110(mask_sh)
+#if defined(CONFIG_DRM_AMD_DC_DCN3_0)
+#define ABM_MASK_SH_LIST_DCN301(mask_sh) ABM_MASK_SH_LIST_DCN10(mask_sh)
+#endif
+
#define ABM_REG_FIELD_LIST(type) \
type ABM1_HG_NUM_OF_BINS_SEL; \
type ABM1_HG_VMAX_SEL; \
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_audio.c b/drivers/gpu/drm/amd/display/dc/dce/dce_audio.c
index 5a35495bc11d..408046579712 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_audio.c
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_audio.c
@@ -140,6 +140,8 @@ static void check_audio_bandwidth_hdmi(
bool limit_freq_to_88_2_khz = false;
bool limit_freq_to_96_khz = false;
bool limit_freq_to_174_4_khz = false;
+ if (!crtc_info)
+ return;
/* For two channels supported return whatever sink support,unmodified*/
if (channel_count > 2) {
@@ -784,7 +786,7 @@ void dce_aud_wall_dto_setup(
struct azalia_clock_info clock_info = { 0 };
- if (dc_is_hdmi_signal(signal)) {
+ if (dc_is_hdmi_tmds_signal(signal)) {
uint32_t src_sel;
/*DTO0 Programming goal:
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_clock_source.c b/drivers/gpu/drm/amd/display/dc/dce/dce_clock_source.c
index d2ad0504b0de..9cc65dc1970f 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_clock_source.c
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_clock_source.c
@@ -1004,16 +1004,58 @@ static bool get_pixel_clk_frequency_100hz(
return false;
}
-
+#if defined(CONFIG_DRM_AMD_DC_DCN3_0)
/* this table is use to find *1.001 and /1.001 pixel rates from non-precise pixel rate */
-struct pixel_rate_range_table_entry {
- unsigned int range_min_khz;
- unsigned int range_max_khz;
- unsigned int target_pixel_rate_khz;
- unsigned short mult_factor;
- unsigned short div_factor;
+const struct pixel_rate_range_table_entry video_optimized_pixel_rates[] = {
+ // /1.001 rates
+ {25170, 25180, 25200, 1000, 1001}, //25.2MHz -> 25.17
+ {59340, 59350, 59400, 1000, 1001}, //59.4Mhz -> 59.340
+ {74170, 74180, 74250, 1000, 1001}, //74.25Mhz -> 74.1758
+ {125870, 125880, 126000, 1000, 1001}, //126Mhz -> 125.87
+ {148350, 148360, 148500, 1000, 1001}, //148.5Mhz -> 148.3516
+ {167830, 167840, 168000, 1000, 1001}, //168Mhz -> 167.83
+ {222520, 222530, 222750, 1000, 1001}, //222.75Mhz -> 222.527
+ {257140, 257150, 257400, 1000, 1001}, //257.4Mhz -> 257.1429
+ {296700, 296710, 297000, 1000, 1001}, //297Mhz -> 296.7033
+ {342850, 342860, 343200, 1000, 1001}, //343.2Mhz -> 342.857
+ {395600, 395610, 396000, 1000, 1001}, //396Mhz -> 395.6
+ {409090, 409100, 409500, 1000, 1001}, //409.5Mhz -> 409.091
+ {445050, 445060, 445500, 1000, 1001}, //445.5Mhz -> 445.055
+ {467530, 467540, 468000, 1000, 1001}, //468Mhz -> 467.5325
+ {519230, 519240, 519750, 1000, 1001}, //519.75Mhz -> 519.231
+ {525970, 525980, 526500, 1000, 1001}, //526.5Mhz -> 525.974
+ {545450, 545460, 546000, 1000, 1001}, //546Mhz -> 545.455
+ {593400, 593410, 594000, 1000, 1001}, //594Mhz -> 593.4066
+ {623370, 623380, 624000, 1000, 1001}, //624Mhz -> 623.377
+ {692300, 692310, 693000, 1000, 1001}, //693Mhz -> 692.308
+ {701290, 701300, 702000, 1000, 1001}, //702Mhz -> 701.2987
+ {791200, 791210, 792000, 1000, 1001}, //792Mhz -> 791.209
+ {890100, 890110, 891000, 1000, 1001}, //891Mhz -> 890.1099
+ {1186810, 1186820, 1188000, 1000, 1001},//1188Mhz -> 1186.8131
+
+ // *1.001 rates
+ {27020, 27030, 27000, 1001, 1000}, //27Mhz
+ {54050, 54060, 54000, 1001, 1000}, //54Mhz
+ {108100, 108110, 108000, 1001, 1000},//108Mhz
};
+const struct pixel_rate_range_table_entry *look_up_in_video_optimized_rate_tlb(
+ unsigned int pixel_rate_khz)
+{
+ int i;
+
+ for (i = 0; i < NUM_ELEMENTS(video_optimized_pixel_rates); i++) {
+ const struct pixel_rate_range_table_entry *e = &video_optimized_pixel_rates[i];
+
+ if (e->range_min_khz <= pixel_rate_khz && pixel_rate_khz <= e->range_max_khz) {
+ return e;
+ }
+ }
+
+ return NULL;
+}
+#endif
+
static bool dcn20_program_pix_clk(
struct clock_source *clock_source,
struct pixel_clk_params *pix_clk_params,
@@ -1031,6 +1073,85 @@ static const struct clock_source_funcs dcn20_clk_src_funcs = {
.get_pixel_clk_frequency_100hz = get_pixel_clk_frequency_100hz
};
+#if defined(CONFIG_DRM_AMD_DC_DCN3_0)
+static bool dcn3_program_pix_clk(
+ struct clock_source *clock_source,
+ struct pixel_clk_params *pix_clk_params,
+ struct pll_settings *pll_settings)
+{
+ struct dce110_clk_src *clk_src = TO_DCE110_CLK_SRC(clock_source);
+ unsigned int inst = pix_clk_params->controller_id - CONTROLLER_ID_D0;
+ unsigned int dp_dto_ref_khz = clock_source->ctx->dc->clk_mgr->dprefclk_khz;
+ const struct pixel_rate_range_table_entry *e =
+ look_up_in_video_optimized_rate_tlb(pix_clk_params->requested_pix_clk_100hz / 10);
+
+ // For these signal types Driver to program DP_DTO without calling VBIOS Command table
+ if (dc_is_dp_signal(pix_clk_params->signal_type)) {
+ if (e) {
+ /* Set DTO values: phase = target clock, modulo = reference clock*/
+ REG_WRITE(PHASE[inst], e->target_pixel_rate_khz * e->mult_factor);
+ REG_WRITE(MODULO[inst], dp_dto_ref_khz * e->div_factor);
+ } else {
+ /* Set DTO values: phase = target clock, modulo = reference clock*/
+ REG_WRITE(PHASE[inst], pll_settings->actual_pix_clk_100hz * 100);
+ REG_WRITE(MODULO[inst], dp_dto_ref_khz * 1000);
+ }
+ REG_UPDATE(PIXEL_RATE_CNTL[inst], DP_DTO0_ENABLE, 1);
+ } else
+ // For other signal types(HDMI_TYPE_A, DVI) Driver still to call VBIOS Command table
+ dce112_program_pix_clk(clock_source, pix_clk_params, pll_settings);
+
+ return true;
+}
+
+static uint32_t dcn3_get_pix_clk_dividers(
+ struct clock_source *cs,
+ struct pixel_clk_params *pix_clk_params,
+ struct pll_settings *pll_settings)
+{
+ unsigned long long actual_pix_clk_100Hz = pix_clk_params->requested_pix_clk_100hz;
+ struct dce110_clk_src *clk_src;
+
+ clk_src = TO_DCE110_CLK_SRC(cs);
+ DC_LOGGER_INIT();
+
+ if (pix_clk_params == NULL || pll_settings == NULL
+ || pix_clk_params->requested_pix_clk_100hz == 0) {
+ DC_LOG_ERROR(
+ "%s: Invalid parameters!!\n", __func__);
+ return -1;
+ }
+
+ memset(pll_settings, 0, sizeof(*pll_settings));
+ /* Adjust for HDMI Type A deep color */
+ if (pix_clk_params->signal_type == SIGNAL_TYPE_HDMI_TYPE_A) {
+ switch (pix_clk_params->color_depth) {
+ case COLOR_DEPTH_101010:
+ actual_pix_clk_100Hz = (actual_pix_clk_100Hz * 5) >> 2;
+ break;
+ case COLOR_DEPTH_121212:
+ actual_pix_clk_100Hz = (actual_pix_clk_100Hz * 6) >> 2;
+ break;
+ case COLOR_DEPTH_161616:
+ actual_pix_clk_100Hz = actual_pix_clk_100Hz * 2;
+ break;
+ default:
+ break;
+ }
+ }
+ pll_settings->actual_pix_clk_100hz = (unsigned int) actual_pix_clk_100Hz;
+ pll_settings->adjusted_pix_clk_100hz = (unsigned int) actual_pix_clk_100Hz;
+ pll_settings->calculated_pix_clk_100hz = (unsigned int) actual_pix_clk_100Hz;
+
+ return 0;
+}
+
+static const struct clock_source_funcs dcn3_clk_src_funcs = {
+ .cs_power_down = dce110_clock_source_power_down,
+ .program_pix_clk = dcn3_program_pix_clk,
+ .get_pix_clk_dividers = dcn3_get_pix_clk_dividers
+};
+#endif
/*****************************************/
/* Constructor */
/*****************************************/
@@ -1415,3 +1536,21 @@ bool dcn20_clk_src_construct(
return ret;
}
+
+#if defined(CONFIG_DRM_AMD_DC_DCN3_0)
+bool dcn3_clk_src_construct(
+ struct dce110_clk_src *clk_src,
+ struct dc_context *ctx,
+ struct dc_bios *bios,
+ enum clock_source_id id,
+ const struct dce110_clk_src_regs *regs,
+ const struct dce110_clk_src_shift *cs_shift,
+ const struct dce110_clk_src_mask *cs_mask)
+{
+ bool ret = dce112_clk_src_construct(clk_src, ctx, bios, id, regs, cs_shift, cs_mask);
+
+ clk_src->base.funcs = &dcn3_clk_src_funcs;
+
+ return ret;
+}
+#endif
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_clock_source.h b/drivers/gpu/drm/amd/display/dc/dce/dce_clock_source.h
index 51bd25079606..69b904ab8151 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_clock_source.h
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_clock_source.h
@@ -91,6 +91,23 @@
SRII(PIXEL_RATE_CNTL, OTG, 2),\
SRII(PIXEL_RATE_CNTL, OTG, 3)
+#if defined(CONFIG_DRM_AMD_DC_DCN3_0)
+#define CS_COMMON_REG_LIST_DCN3_0(index, pllid) \
+ SRI(PIXCLK_RESYNC_CNTL, PHYPLL, pllid),\
+ SRII(PHASE, DP_DTO, 0),\
+ SRII(PHASE, DP_DTO, 1),\
+ SRII(PHASE, DP_DTO, 2),\
+ SRII(PHASE, DP_DTO, 3),\
+ SRII(MODULO, DP_DTO, 0),\
+ SRII(MODULO, DP_DTO, 1),\
+ SRII(MODULO, DP_DTO, 2),\
+ SRII(MODULO, DP_DTO, 3),\
+ SRII(PIXEL_RATE_CNTL, OTG, 0),\
+ SRII(PIXEL_RATE_CNTL, OTG, 1),\
+ SRII(PIXEL_RATE_CNTL, OTG, 2),\
+ SRII(PIXEL_RATE_CNTL, OTG, 3)
+#endif
+
#define CS_COMMON_MASK_SH_LIST_DCN2_0(mask_sh)\
CS_SF(DP_DTO0_PHASE, DP_DTO0_PHASE, mask_sh),\
CS_SF(DP_DTO0_MODULO, DP_DTO0_MODULO, mask_sh),\
@@ -204,4 +221,29 @@ bool dcn20_clk_src_construct(
const struct dce110_clk_src_shift *cs_shift,
const struct dce110_clk_src_mask *cs_mask);
+#if defined(CONFIG_DRM_AMD_DC_DCN3_0)
+bool dcn3_clk_src_construct(
+ struct dce110_clk_src *clk_src,
+ struct dc_context *ctx,
+ struct dc_bios *bios,
+ enum clock_source_id id,
+ const struct dce110_clk_src_regs *regs,
+ const struct dce110_clk_src_shift *cs_shift,
+ const struct dce110_clk_src_mask *cs_mask);
+#endif
+
+/* this table is use to find *1.001 and /1.001 pixel rates from non-precise pixel rate */
+struct pixel_rate_range_table_entry {
+ unsigned int range_min_khz;
+ unsigned int range_max_khz;
+ unsigned int target_pixel_rate_khz;
+ unsigned short mult_factor;
+ unsigned short div_factor;
+};
+
+#if defined(CONFIG_DRM_AMD_DC_DCN3_0)
+extern const struct pixel_rate_range_table_entry video_optimized_pixel_rates[];
+const struct pixel_rate_range_table_entry *look_up_in_video_optimized_rate_tlb(
+ unsigned int pixel_rate_khz);
+#endif
#endif
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_hwseq.h b/drivers/gpu/drm/amd/display/dc/dce/dce_hwseq.h
index 5479d959ec62..66b88d6ba398 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_hwseq.h
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_hwseq.h
@@ -78,6 +78,24 @@
SRII(PHYPLL_PIXEL_RATE_CNTL, blk, 4), \
SRII(PHYPLL_PIXEL_RATE_CNTL, blk, 5)
+#if defined(CONFIG_DRM_AMD_DC_DCN3_0)
+#define HWSEQ_PIXEL_RATE_REG_LIST_3(blk) \
+ SRII(PIXEL_RATE_CNTL, blk, 0), \
+ SRII(PIXEL_RATE_CNTL, blk, 1),\
+ SRII(PIXEL_RATE_CNTL, blk, 2),\
+ SRII(PIXEL_RATE_CNTL, blk, 3), \
+ SRII(PIXEL_RATE_CNTL, blk, 4), \
+ SRII(PIXEL_RATE_CNTL, blk, 5)
+
+#define HWSEQ_PHYPLL_REG_LIST_3(blk) \
+ SRII(PHYPLL_PIXEL_RATE_CNTL, blk, 0), \
+ SRII(PHYPLL_PIXEL_RATE_CNTL, blk, 1),\
+ SRII(PHYPLL_PIXEL_RATE_CNTL, blk, 2),\
+ SRII(PHYPLL_PIXEL_RATE_CNTL, blk, 3), \
+ SRII(PHYPLL_PIXEL_RATE_CNTL, blk, 4), \
+ SRII(PHYPLL_PIXEL_RATE_CNTL, blk, 5)
+#endif
+
#define HWSEQ_DCE11_REG_LIST_BASE() \
SR(DC_MEM_GLOBAL_PWR_REQ_CNTL), \
SR(DCFEV_CLOCK_CONTROL), \
@@ -200,6 +218,28 @@
SR(VGA_TEST_CONTROL), \
SR(DC_IP_REQUEST_CNTL)
+#if defined(CONFIG_DRM_AMD_DC_DCN3_0)
+#define HWSEQ_DCN30_REG_LIST()\
+ HWSEQ_DCN2_REG_LIST(),\
+ HWSEQ_DCN_REG_LIST(), \
+ HWSEQ_PIXEL_RATE_REG_LIST_3(OTG), \
+ HWSEQ_PHYPLL_REG_LIST_3(OTG), \
+ SR(MICROSECOND_TIME_BASE_DIV), \
+ SR(MILLISECOND_TIME_BASE_DIV), \
+ SR(DISPCLK_FREQ_CHANGE_CNTL), \
+ SR(RBBMIF_TIMEOUT_DIS), \
+ SR(RBBMIF_TIMEOUT_DIS_2), \
+ SR(DCHUBBUB_CRC_CTRL), \
+ SR(DPP_TOP0_DPP_CRC_CTRL), \
+ SR(DPP_TOP0_DPP_CRC_VAL_B_A), \
+ SR(DPP_TOP0_DPP_CRC_VAL_R_G), \
+ SR(MPC_CRC_CTRL), \
+ SR(MPC_CRC_RESULT_GB), \
+ SR(MPC_CRC_RESULT_C), \
+ SR(MPC_CRC_RESULT_AR), \
+ SR(AZALIA_AUDIO_DTO), \
+ SR(AZALIA_CONTROLLER_CLOCK_GATING)
+#endif
#define HWSEQ_DCN2_REG_LIST()\
HWSEQ_DCN_REG_LIST(), \
HSWEQ_DCN_PIXEL_RATE_REG_LIST(OTG, 0), \
@@ -546,6 +586,12 @@ struct dce_hwseq_registers {
HWS_SF(, VGA_TEST_CONTROL, VGA_TEST_ENABLE, mask_sh),\
HWS_SF(, VGA_TEST_CONTROL, VGA_TEST_RENDER_START, mask_sh)
+#if defined(CONFIG_DRM_AMD_DC_DCN3_0)
+#define HWSEQ_DCN30_MASK_SH_LIST(mask_sh)\
+ HWSEQ_DCN2_MASK_SH_LIST(mask_sh), \
+ HWS_SF(, AZALIA_AUDIO_DTO, AZALIA_AUDIO_DTO_MODULE, mask_sh)
+#endif
+
#define HWSEQ_DCN2_MASK_SH_LIST(mask_sh)\
HWSEQ_DCN_MASK_SH_LIST(mask_sh), \
HWS_SF(, DCHUBBUB_GLOBAL_TIMER_CNTL, DCHUBBUB_GLOBAL_TIMER_REFDIV, mask_sh), \
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_panel_cntl.c b/drivers/gpu/drm/amd/display/dc/dce/dce_panel_cntl.c
index ebff9b1e312e..43781e77be43 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_panel_cntl.c
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_panel_cntl.c
@@ -95,7 +95,7 @@ static unsigned int calculate_16_bit_backlight_from_pwm(struct dce_panel_cntl *d
return (uint32_t)(current_backlight);
}
-uint32_t dce_panel_cntl_hw_init(struct panel_cntl *panel_cntl)
+static uint32_t dce_panel_cntl_hw_init(struct panel_cntl *panel_cntl)
{
struct dce_panel_cntl *dce_panel_cntl = TO_DCE_PANEL_CNTL(panel_cntl);
uint32_t value;
@@ -155,7 +155,7 @@ uint32_t dce_panel_cntl_hw_init(struct panel_cntl *panel_cntl)
return current_backlight;
}
-bool dce_is_panel_backlight_on(struct panel_cntl *panel_cntl)
+static bool dce_is_panel_backlight_on(struct panel_cntl *panel_cntl)
{
struct dce_panel_cntl *dce_panel_cntl = TO_DCE_PANEL_CNTL(panel_cntl);
uint32_t value;
@@ -165,7 +165,7 @@ bool dce_is_panel_backlight_on(struct panel_cntl *panel_cntl)
return value;
}
-bool dce_is_panel_powered_on(struct panel_cntl *panel_cntl)
+static bool dce_is_panel_powered_on(struct panel_cntl *panel_cntl)
{
struct dce_panel_cntl *dce_panel_cntl = TO_DCE_PANEL_CNTL(panel_cntl);
uint32_t pwr_seq_state, dig_on, dig_on_ovrd;
@@ -177,7 +177,7 @@ bool dce_is_panel_powered_on(struct panel_cntl *panel_cntl)
return (pwr_seq_state == 1) || (dig_on == 1 && dig_on_ovrd == 1);
}
-void dce_store_backlight_level(struct panel_cntl *panel_cntl)
+static void dce_store_backlight_level(struct panel_cntl *panel_cntl)
{
struct dce_panel_cntl *dce_panel_cntl = TO_DCE_PANEL_CNTL(panel_cntl);
@@ -192,7 +192,7 @@ void dce_store_backlight_level(struct panel_cntl *panel_cntl)
&panel_cntl->stored_backlight_registers.LVTMA_PWRSEQ_REF_DIV_BL_PWM_REF_DIV);
}
-void dce_driver_set_backlight(struct panel_cntl *panel_cntl,
+static void dce_driver_set_backlight(struct panel_cntl *panel_cntl,
uint32_t backlight_pwm_u16_16)
{
uint32_t backlight_16bit;
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dmub_abm.c b/drivers/gpu/drm/amd/display/dc/dce/dmub_abm.c
index da0b29abfbda..0cf130dc4e52 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dmub_abm.c
+++ b/drivers/gpu/drm/amd/display/dc/dce/dmub_abm.c
@@ -50,71 +50,7 @@
#define DISABLE_ABM_IMMEDIATELY 255
-static bool dmub_abm_set_pipe(struct abm *abm, uint32_t otg_inst, uint32_t panel_inst)
-{
- union dmub_rb_cmd cmd;
- struct dc_context *dc = abm->ctx;
- uint32_t ramping_boundary = 0xFFFF;
-
- cmd.abm_set_pipe.header.type = DMUB_CMD__ABM;
- cmd.abm_set_pipe.header.sub_type = DMUB_CMD__ABM_SET_PIPE;
- cmd.abm_set_pipe.abm_set_pipe_data.otg_inst = otg_inst;
- cmd.abm_set_pipe.abm_set_pipe_data.panel_inst = panel_inst;
- cmd.abm_set_pipe.abm_set_pipe_data.ramping_boundary = ramping_boundary;
- cmd.abm_set_pipe.header.payload_bytes = sizeof(struct dmub_cmd_abm_set_pipe_data);
-
- dc_dmub_srv_cmd_queue(dc->dmub_srv, &cmd);
- dc_dmub_srv_cmd_execute(dc->dmub_srv);
- dc_dmub_srv_wait_idle(dc->dmub_srv);
-
- return true;
-}
-
-static void dmcub_set_backlight_level(
- struct dce_abm *dce_abm,
- uint32_t backlight_pwm_u16_16,
- uint32_t frame_ramp,
- uint32_t otg_inst,
- uint32_t panel_inst)
-{
- union dmub_rb_cmd cmd;
- struct dc_context *dc = dce_abm->base.ctx;
- unsigned int backlight_8_bit = 0;
- uint32_t s2;
-
- if (backlight_pwm_u16_16 & 0x10000)
- // Check for max backlight condition
- backlight_8_bit = 0xFF;
- else
- // Take MSB of fractional part since backlight is not max
- backlight_8_bit = (backlight_pwm_u16_16 >> 8) & 0xFF;
-
- dmub_abm_set_pipe(&dce_abm->base, otg_inst, panel_inst);
-
- REG_UPDATE(BL1_PWM_USER_LEVEL, BL1_PWM_USER_LEVEL, backlight_pwm_u16_16);
-
- if (otg_inst == 0)
- frame_ramp = 0;
-
- cmd.abm_set_backlight.header.type = DMUB_CMD__ABM;
- cmd.abm_set_backlight.header.sub_type = DMUB_CMD__ABM_SET_BACKLIGHT;
- cmd.abm_set_backlight.abm_set_backlight_data.frame_ramp = frame_ramp;
- cmd.abm_set_backlight.header.payload_bytes = sizeof(struct dmub_cmd_abm_set_backlight_data);
- dc_dmub_srv_cmd_queue(dc->dmub_srv, &cmd);
- dc_dmub_srv_cmd_execute(dc->dmub_srv);
- dc_dmub_srv_wait_idle(dc->dmub_srv);
-
- // Update requested backlight level
- s2 = REG_READ(BIOS_SCRATCH_2);
-
- s2 &= ~ATOM_S2_CURRENT_BL_LEVEL_MASK;
- backlight_8_bit &= (ATOM_S2_CURRENT_BL_LEVEL_MASK >>
- ATOM_S2_CURRENT_BL_LEVEL_SHIFT);
- s2 |= (backlight_8_bit << ATOM_S2_CURRENT_BL_LEVEL_SHIFT);
-
- REG_WRITE(BIOS_SCRATCH_2, s2);
-}
static void dmub_abm_enable_fractional_pwm(struct dc_context *dc)
{
@@ -211,31 +147,6 @@ static bool dmub_abm_set_level(struct abm *abm, uint32_t level)
return true;
}
-static bool dmub_abm_immediate_disable(struct abm *abm, uint32_t panel_inst)
-{
- dmub_abm_set_pipe(abm, DISABLE_ABM_IMMEDIATELY, panel_inst);
-
- return true;
-}
-
-static bool dmub_abm_set_backlight_level_pwm(
- struct abm *abm,
- unsigned int backlight_pwm_u16_16,
- unsigned int frame_ramp,
- unsigned int otg_inst,
- uint32_t panel_inst)
-{
- struct dce_abm *dce_abm = TO_DMUB_ABM(abm);
-
- dmcub_set_backlight_level(dce_abm,
- backlight_pwm_u16_16,
- frame_ramp,
- otg_inst,
- panel_inst);
-
- return true;
-}
-
static bool dmub_abm_init_config(struct abm *abm,
const char *src,
unsigned int bytes)
@@ -266,11 +177,8 @@ static bool dmub_abm_init_config(struct abm *abm,
static const struct abm_funcs abm_funcs = {
.abm_init = dmub_abm_init,
.set_abm_level = dmub_abm_set_level,
- .set_pipe = dmub_abm_set_pipe,
- .set_backlight_level_pwm = dmub_abm_set_backlight_level_pwm,
.get_current_backlight = dmub_abm_get_current_backlight,
.get_target_backlight = dmub_abm_get_target_backlight,
- .set_abm_immediate_disable = dmub_abm_immediate_disable,
.init_abm_config = dmub_abm_init_config,
};
diff --git a/drivers/gpu/drm/amd/display/dmub/inc/dmub_types.h b/drivers/gpu/drm/amd/display/dc/dce/dmub_hw_lock_mgr.c
index bed5b023a396..d399270fd17e 100644
--- a/drivers/gpu/drm/amd/display/dmub/inc/dmub_types.h
+++ b/drivers/gpu/drm/amd/display/dc/dce/dmub_hw_lock_mgr.c
@@ -23,53 +23,35 @@
*
*/
-#ifndef _DMUB_TYPES_H_
-#define _DMUB_TYPES_H_
-
-/* Basic type definitions. */
-#include <asm/byteorder.h>
-#include <linux/types.h>
-#include <linux/string.h>
-#include <linux/delay.h>
-#include <stdarg.h>
-
-#if defined(__cplusplus)
-extern "C" {
-#endif
-
-#ifndef dmub_memcpy
-#define dmub_memcpy(dest, source, bytes) memcpy((dest), (source), (bytes))
-#endif
-
-#ifndef dmub_memset
-#define dmub_memset(dest, val, bytes) memset((dest), (val), (bytes))
-#endif
-
-#ifndef dmub_udelay
-#define dmub_udelay(microseconds) udelay(microseconds)
-#endif
-
-/* Maximum number of streams on any ASIC. */
-#define DMUB_MAX_STREAMS 6
-
-/* Maximum number of planes on any ASIC. */
-#define DMUB_MAX_PLANES 6
-
-union dmub_addr {
- struct {
- uint32_t low_part;
- uint32_t high_part;
- } u;
- uint64_t quad_part;
-};
-
-struct dmub_psr_debug_flags {
- uint8_t visual_confirm : 1;
- uint8_t reserved : 7;
-};
-
-#if defined(__cplusplus)
+#include "dmub_hw_lock_mgr.h"
+#include "dc_dmub_srv.h"
+#include "dc_types.h"
+#include "core_types.h"
+
+void dmub_hw_lock_mgr_cmd(struct dc_dmub_srv *dmub_srv,
+ bool lock,
+ union dmub_hw_lock_flags *hw_locks,
+ struct dmub_hw_lock_inst_flags *inst_flags)
+{
+ union dmub_rb_cmd cmd = { 0 };
+
+ cmd.lock_hw.header.type = DMUB_CMD__HW_LOCK;
+ cmd.lock_hw.header.sub_type = 0;
+ cmd.lock_hw.header.payload_bytes = sizeof(struct dmub_cmd_lock_hw_data);
+ cmd.lock_hw.lock_hw_data.client = HW_LOCK_CLIENT_DRIVER;
+ cmd.lock_hw.lock_hw_data.lock = lock;
+ cmd.lock_hw.lock_hw_data.hw_locks.u8All = hw_locks->u8All;
+ memcpy(&cmd.lock_hw.lock_hw_data.inst_flags, inst_flags, sizeof(struct dmub_hw_lock_inst_flags));
+
+ if (!lock)
+ cmd.lock_hw.lock_hw_data.should_release = 1;
+
+ dc_dmub_srv_cmd_queue(dmub_srv, &cmd);
+ dc_dmub_srv_cmd_execute(dmub_srv);
+ dc_dmub_srv_wait_idle(dmub_srv);
}
-#endif
-#endif /* _DMUB_TYPES_H_ */
+bool should_use_dmub_lock(struct dc_link *link)
+{
+ return false;
+}
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dmub_hw_lock_mgr.h b/drivers/gpu/drm/amd/display/dc/dce/dmub_hw_lock_mgr.h
new file mode 100644
index 000000000000..bc5906347493
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dce/dmub_hw_lock_mgr.h
@@ -0,0 +1,39 @@
+/*
+ * Copyright 2012-16 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef _DMUB_HW_LOCK_MGR_H_
+#define _DMUB_HW_LOCK_MGR_H_
+
+#include "dc_dmub_srv.h"
+#include "core_types.h"
+
+void dmub_hw_lock_mgr_cmd(struct dc_dmub_srv *dmub_srv,
+ bool lock,
+ union dmub_hw_lock_flags *hw_locks,
+ struct dmub_hw_lock_inst_flags *inst_flags);
+
+bool should_use_dmub_lock(struct dc_link *link);
+
+#endif /*_DMUB_HW_LOCK_MGR_H_ */
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dmub_psr.c b/drivers/gpu/drm/amd/display/dc/dce/dmub_psr.c
index 044a0133ebb1..916d305d3022 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dmub_psr.c
+++ b/drivers/gpu/drm/amd/display/dc/dce/dmub_psr.c
@@ -231,8 +231,9 @@ static bool dmub_psr_copy_settings(struct dmub_psr *dmub,
copy_settings_data->smu_optimizations_en = psr_context->allow_smu_optimizations;
copy_settings_data->frame_delay = psr_context->frame_delay;
copy_settings_data->frame_cap_ind = psr_context->psrFrameCaptureIndicationReq;
- copy_settings_data->debug.visual_confirm = dc->dc->debug.visual_confirm == VISUAL_CONFIRM_PSR ?
+ copy_settings_data->debug.bitfields.visual_confirm = dc->dc->debug.visual_confirm == VISUAL_CONFIRM_PSR ?
true : false;
+ copy_settings_data->init_sdp_deadline = psr_context->sdpTransmitLineNumDeadline;
dc_dmub_srv_cmd_queue(dc->dmub_srv, &cmd);
dc_dmub_srv_cmd_execute(dc->dmub_srv);
diff --git a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c
index b77e9dc16086..49380ed3aeae 100644
--- a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c
+++ b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c
@@ -1069,8 +1069,17 @@ void dce110_blank_stream(struct pipe_ctx *pipe_ctx)
link->dc->hwss.set_abm_immediate_disable(pipe_ctx);
}
- if (dc_is_dp_signal(pipe_ctx->stream->signal))
+ if (dc_is_dp_signal(pipe_ctx->stream->signal)) {
pipe_ctx->stream_res.stream_enc->funcs->dp_blank(pipe_ctx->stream_res.stream_enc);
+
+ /*
+ * After output is idle pattern some sinks need time to recognize the stream
+ * has changed or they enter protection state and hang.
+ */
+ if (!dc_is_embedded_signal(pipe_ctx->stream->signal))
+ msleep(60);
+ }
+
}
@@ -1148,7 +1157,7 @@ static void build_audio_output(
pipe_ctx->stream_res.pix_clk_params.requested_pix_clk_100hz;
/*for HDMI, audio ACR is with deep color ratio factor*/
- if (dc_is_hdmi_signal(pipe_ctx->stream->signal) &&
+ if (dc_is_hdmi_tmds_signal(pipe_ctx->stream->signal) &&
audio_output->crtc_info.requested_pixel_clock_100Hz ==
(stream->timing.pix_clk_100hz)) {
if (pipe_ctx->stream_res.pix_clk_params.pixel_encoding == PIXEL_ENCODING_YCBCR420) {
@@ -1443,6 +1452,8 @@ static void power_down_encoders(struct dc *dc)
dc->links[i]->link_enc->funcs->disable_output(
dc->links[i]->link_enc, signal);
+
+ dc->links[i]->link_status.link_active = false;
}
}
@@ -1961,10 +1972,8 @@ static void dce110_setup_audio_dto(
if (pipe_ctx->top_pipe)
continue;
-
if (pipe_ctx->stream->signal != SIGNAL_TYPE_HDMI_TYPE_A)
continue;
-
if (pipe_ctx->stream_res.audio != NULL) {
struct audio_output audio_output;
@@ -2767,6 +2776,16 @@ void dce110_set_abm_immediate_disable(struct pipe_ctx *pipe_ctx)
panel_cntl->funcs->store_backlight_level(panel_cntl);
}
+void dce110_set_pipe(struct pipe_ctx *pipe_ctx)
+{
+ struct abm *abm = pipe_ctx->stream_res.abm;
+ struct panel_cntl *panel_cntl = pipe_ctx->stream->link->panel_cntl;
+ uint32_t otg_inst = pipe_ctx->stream_res.tg->inst + 1;
+
+ if (abm && panel_cntl)
+ abm->funcs->set_pipe(abm, otg_inst, panel_cntl->inst);
+}
+
static const struct hw_sequencer_funcs dce110_funcs = {
.program_gamut_remap = program_gamut_remap,
.program_output_csc = program_output_csc,
@@ -2804,6 +2823,7 @@ static const struct hw_sequencer_funcs dce110_funcs = {
.set_cursor_attribute = dce110_set_cursor_attribute,
.set_backlight_level = dce110_set_backlight_level,
.set_abm_immediate_disable = dce110_set_abm_immediate_disable,
+ .set_pipe = dce110_set_pipe,
};
static const struct hwseq_private_funcs dce110_private_funcs = {
diff --git a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.h b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.h
index fe5326df00f7..b6f3843d3d05 100644
--- a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.h
+++ b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.h
@@ -89,6 +89,7 @@ bool dce110_set_backlight_level(struct pipe_ctx *pipe_ctx,
uint32_t backlight_pwm_u16_16,
uint32_t frame_ramp);
void dce110_set_abm_immediate_disable(struct pipe_ctx *pipe_ctx);
+void dce110_set_pipe(struct pipe_ctx *pipe_ctx);
#endif /* __DC_HWSS_DCE110_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.c
index 319366ebb44f..cedf359a00f5 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.c
@@ -326,6 +326,18 @@ void hubp1_program_pixel_format(
REG_UPDATE(DCSURF_SURFACE_CONFIG,
SURFACE_PIXEL_FORMAT, 119);
break;
+#if defined(CONFIG_DRM_AMD_DC_DCN3_0)
+ case SURFACE_PIXEL_FORMAT_GRPH_RGBE:
+ REG_UPDATE_2(DCSURF_SURFACE_CONFIG,
+ SURFACE_PIXEL_FORMAT, 116,
+ ALPHA_PLANE_EN, 0);
+ break;
+ case SURFACE_PIXEL_FORMAT_GRPH_RGBE_ALPHA:
+ REG_UPDATE_2(DCSURF_SURFACE_CONFIG,
+ SURFACE_PIXEL_FORMAT, 116,
+ ALPHA_PLANE_EN, 1);
+ break;
+#endif
default:
BREAK_TO_DEBUGGER();
break;
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
index 77f16921e7f0..cb45f05a0319 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
@@ -51,6 +51,7 @@
#include "link_hwss.h"
#include "dpcd_defs.h"
#include "dsc.h"
+#include "dce/dmub_hw_lock_mgr.h"
#define DC_LOGGER_INIT(logger)
@@ -1287,7 +1288,9 @@ void dcn10_init_hw(struct dc *dc)
if (!dcb->funcs->is_accelerated_mode(dcb))
hws->funcs.disable_vga(dc->hwseq);
- hws->funcs.bios_golden_init(dc);
+ if (!dc_dmub_srv_optimized_init_done(dc->ctx->dmub_srv))
+ hws->funcs.bios_golden_init(dc);
+
if (dc->ctx->dc_bios->fw_info_valid) {
res_pool->ref_clocks.xtalin_clock_inKhz =
dc->ctx->dc_bios->fw_info.pll_info.crystal_frequency;
@@ -1393,10 +1396,10 @@ void dcn10_init_hw(struct dc *dc)
if (edp_link &&
edp_link->link_enc->funcs->is_dig_enabled &&
edp_link->link_enc->funcs->is_dig_enabled(edp_link->link_enc) &&
- dc->hwss.edp_backlight_control &&
+ dc->hwseq->funcs.edp_backlight_control &&
dc->hwss.power_down &&
dc->hwss.edp_power_control) {
- dc->hwss.edp_backlight_control(edp_link, false);
+ dc->hwseq->funcs.edp_backlight_control(edp_link, false);
dc->hwss.power_down(dc);
dc->hwss.edp_power_control(edp_link, false);
} else {
@@ -1453,6 +1456,11 @@ void dcn10_init_hw(struct dc *dc)
if (dc->clk_mgr->funcs->notify_wm_ranges)
dc->clk_mgr->funcs->notify_wm_ranges(dc->clk_mgr);
+#ifdef CONFIG_DRM_AMD_DC_DCN3_0
+ if (dc->clk_mgr->funcs->set_hard_max_memclk)
+ dc->clk_mgr->funcs->set_hard_max_memclk(dc->clk_mgr);
+#endif
+
}
void dcn10_reset_hw_ctx_wrap(
@@ -1758,8 +1766,20 @@ void dcn10_cursor_lock(struct dc *dc, struct pipe_ctx *pipe, bool lock)
if (lock)
delay_cursor_until_vupdate(dc, pipe);
- dc->res_pool->mpc->funcs->cursor_lock(dc->res_pool->mpc,
- pipe->stream_res.opp->inst, lock);
+ if (pipe->stream && should_use_dmub_lock(pipe->stream->link)) {
+ union dmub_hw_lock_flags hw_locks = { 0 };
+ struct dmub_hw_lock_inst_flags inst_flags = { 0 };
+
+ hw_locks.bits.lock_cursor = 1;
+ inst_flags.opp_inst = pipe->stream_res.opp->inst;
+
+ dmub_hw_lock_mgr_cmd(dc->ctx->dmub_srv,
+ lock,
+ &hw_locks,
+ &inst_flags);
+ } else
+ dc->res_pool->mpc->funcs->cursor_lock(dc->res_pool->mpc,
+ pipe->stream_res.opp->inst, lock);
}
static bool wait_for_reset_trigger_to_occur(
@@ -2576,14 +2596,15 @@ void dcn10_blank_pixel_data(
if (stream_res->tg->funcs->set_blank)
stream_res->tg->funcs->set_blank(stream_res->tg, blank);
if (stream_res->abm) {
- stream_res->abm->funcs->set_pipe(stream_res->abm, stream_res->tg->inst + 1,
- stream->link->panel_cntl->inst);
+ dc->hwss.set_pipe(pipe_ctx);
stream_res->abm->funcs->set_abm_level(stream_res->abm, stream->abm_level);
}
} else if (blank) {
dc->hwss.set_abm_immediate_disable(pipe_ctx);
- if (stream_res->tg->funcs->set_blank)
+ if (stream_res->tg->funcs->set_blank) {
+ stream_res->tg->funcs->wait_for_state(stream_res->tg, CRTC_STATE_VBLANK);
stream_res->tg->funcs->set_blank(stream_res->tg, blank);
+ }
}
}
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_init.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_init.c
index 7cb8c3fb2665..f6a790c49321 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_init.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_init.c
@@ -75,6 +75,7 @@ static const struct hw_sequencer_funcs dcn10_funcs = {
.calc_vupdate_position = dcn10_calc_vupdate_position,
.set_backlight_level = dce110_set_backlight_level,
.set_abm_immediate_disable = dce110_set_abm_immediate_disable,
+ .set_pipe = dce110_set_pipe,
};
static const struct hwseq_private_funcs dcn10_private_funcs = {
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_link_encoder.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_link_encoder.c
index 7fd385be3f3d..81db0179f7ea 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_link_encoder.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_link_encoder.c
@@ -619,11 +619,17 @@ bool dcn10_link_encoder_validate_dvi_output(
static bool dcn10_link_encoder_validate_hdmi_output(
const struct dcn10_link_encoder *enc10,
const struct dc_crtc_timing *crtc_timing,
+ const struct dc_edid_caps *edid_caps,
int adjusted_pix_clk_100hz)
{
enum dc_color_depth max_deep_color =
enc10->base.features.max_hdmi_deep_color;
+ // check pixel clock against edid specified max TMDS clk
+ if (edid_caps->max_tmds_clk_mhz != 0 &&
+ adjusted_pix_clk_100hz > edid_caps->max_tmds_clk_mhz * 10000)
+ return false;
+
if (max_deep_color < crtc_timing->display_color_depth)
return false;
@@ -801,6 +807,7 @@ bool dcn10_link_encoder_validate_output_with_stream(
is_valid = dcn10_link_encoder_validate_hdmi_output(
enc10,
&stream->timing,
+ &stream->sink->edid_caps,
stream->phy_pix_clk * 10);
break;
case SIGNAL_TYPE_DISPLAY_PORT:
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_link_encoder.h b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_link_encoder.h
index 68395bcc24fd..cf59ab0034dc 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_link_encoder.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_link_encoder.h
@@ -153,6 +153,12 @@ struct dcn10_link_enc_registers {
uint32_t RAWLANE2_DIG_PCS_XF_RX_OVRD_IN_3;
uint32_t RAWLANE3_DIG_PCS_XF_RX_OVRD_IN_2;
uint32_t RAWLANE3_DIG_PCS_XF_RX_OVRD_IN_3;
+#if defined(CONFIG_DRM_AMD_DC_DCN3_0)
+ uint32_t TMDS_DCBALANCER_CONTROL;
+ uint32_t PHYA_LINK_CNTL2;
+ uint32_t PHYB_LINK_CNTL2;
+ uint32_t PHYC_LINK_CNTL2;
+#endif
};
#define LE_SF(reg_name, field_name, post_fix)\
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.c
index ec0ab42becba..2972392f9788 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.c
@@ -288,8 +288,19 @@ void optc1_program_timing(
if (optc1_is_two_pixels_per_containter(&patched_crtc_timing) || optc1->opp_count == 2)
h_div = H_TIMING_DIV_BY2;
- REG_UPDATE(OTG_H_TIMING_CNTL,
+#if defined(CONFIG_DRM_AMD_DC_DCN3_0)
+ if (optc1->tg_mask->OTG_H_TIMING_DIV_MODE != 0) {
+ if (optc1->opp_count == 4)
+ h_div = H_TIMING_DIV_BY4;
+
+ REG_UPDATE(OTG_H_TIMING_CNTL,
+ OTG_H_TIMING_DIV_MODE, h_div);
+ } else
+#endif
+ {
+ REG_UPDATE(OTG_H_TIMING_CNTL,
OTG_H_TIMING_DIV_BY2, h_div);
+ }
}
void optc1_set_vtg_params(struct timing_generator *optc,
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.h b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.h
index 8d1e52fb0393..b38475285835 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.h
@@ -171,6 +171,15 @@ struct dcn_optc_registers {
uint32_t OPTC_DATA_FORMAT_CONTROL;
uint32_t OPTC_BYTES_PER_PIXEL;
uint32_t OPTC_WIDTH_CONTROL;
+#if defined(CONFIG_DRM_AMD_DC_DCN3_0)
+ uint32_t OTG_BLANK_DATA_COLOR;
+ uint32_t OTG_BLANK_DATA_COLOR_EXT;
+ uint32_t OTG_DRR_TRIGGER_WINDOW;
+ uint32_t OTG_M_CONST_DTO0;
+ uint32_t OTG_M_CONST_DTO1;
+ uint32_t OTG_DRR_V_TOTAL_CHANGE;
+ uint32_t OTG_GLOBAL_CONTROL4;
+#endif
};
#define TG_COMMON_MASK_SH_LIST_DCN(mask_sh)\
@@ -296,6 +305,8 @@ struct dcn_optc_registers {
SF(GSL_SOURCE_SELECT, GSL2_READY_SOURCE_SEL, mask_sh),\
SF(OTG0_OTG_GLOBAL_CONTROL2, MANUAL_FLOW_CONTROL_SEL, mask_sh)
+
+
#define TG_COMMON_MASK_SH_LIST_DCN1_0(mask_sh)\
TG_COMMON_MASK_SH_LIST_DCN(mask_sh),\
SF(OTG0_OTG_TEST_PATTERN_PARAMETERS, OTG_TEST_PATTERN_INC0, mask_sh),\
@@ -385,6 +396,13 @@ struct dcn_optc_registers {
type OTG_BLACK_COLOR_B_CB;\
type OTG_BLACK_COLOR_G_Y;\
type OTG_BLACK_COLOR_R_CR;\
+ type OTG_BLANK_DATA_COLOR_BLUE_CB;\
+ type OTG_BLANK_DATA_COLOR_GREEN_Y;\
+ type OTG_BLANK_DATA_COLOR_RED_CR;\
+ type OTG_BLANK_DATA_COLOR_BLUE_CB_EXT;\
+ type OTG_BLANK_DATA_COLOR_GREEN_Y_EXT;\
+ type OTG_BLANK_DATA_COLOR_RED_CR_EXT;\
+ type OTG_VTOTAL_MID_REPLACING_MIN_EN;\
type OTG_TEST_PATTERN_INC0;\
type OTG_TEST_PATTERN_INC1;\
type OTG_TEST_PATTERN_VRES;\
@@ -456,9 +474,17 @@ struct dcn_optc_registers {
type MANUAL_FLOW_CONTROL;\
type MANUAL_FLOW_CONTROL_SEL;
+#if defined(CONFIG_DRM_AMD_DC_DCN3_0)
#define TG_REG_FIELD_LIST(type) \
TG_REG_FIELD_LIST_DCN1_0(type)\
+ type OTG_V_SYNC_MODE;\
+ type OTG_DRR_TRIGGER_WINDOW_START_X;\
+ type OTG_DRR_TRIGGER_WINDOW_END_X;\
+ type OTG_DRR_V_TOTAL_CHANGE_LIMIT;\
+ type OTG_OUT_MUX;\
+ type OTG_M_CONST_DTO_PHASE;\
+ type OTG_M_CONST_DTO_MODULO;\
type MASTER_UPDATE_LOCK_DB_X;\
type MASTER_UPDATE_LOCK_DB_Y;\
type MASTER_UPDATE_LOCK_DB_EN;\
@@ -469,6 +495,8 @@ struct dcn_optc_registers {
type OPTC_NUM_OF_INPUT_SEGMENT;\
type OPTC_SEG0_SRC_SEL;\
type OPTC_SEG1_SRC_SEL;\
+ type OPTC_SEG2_SRC_SEL;\
+ type OPTC_SEG3_SRC_SEL;\
type OPTC_MEM_SEL;\
type OPTC_DATA_FORMAT;\
type OPTC_DSC_MODE;\
@@ -477,11 +505,45 @@ struct dcn_optc_registers {
type OPTC_SEGMENT_WIDTH;\
type OPTC_DWB0_SOURCE_SELECT;\
type OPTC_DWB1_SOURCE_SELECT;\
+ type MASTER_UPDATE_LOCK_DB_START_X;\
+ type MASTER_UPDATE_LOCK_DB_END_X;\
+ type MASTER_UPDATE_LOCK_DB_START_Y;\
+ type MASTER_UPDATE_LOCK_DB_END_Y;\
+ type DIG_UPDATE_POSITION_X;\
+ type DIG_UPDATE_POSITION_Y;\
+ type OTG_H_TIMING_DIV_MODE;\
+ type OTG_DRR_TIMING_DBUF_UPDATE_MODE;\
type OTG_CRC_DSC_MODE;\
type OTG_CRC_DATA_STREAM_COMBINE_MODE;\
type OTG_CRC_DATA_STREAM_SPLIT_MODE;\
type OTG_CRC_DATA_FORMAT;
+#else
+#define TG_REG_FIELD_LIST(type) \
+ TG_REG_FIELD_LIST_DCN1_0(type)\
+ type MASTER_UPDATE_LOCK_DB_X;\
+ type MASTER_UPDATE_LOCK_DB_Y;\
+ type MASTER_UPDATE_LOCK_DB_EN;\
+ type GLOBAL_UPDATE_LOCK_EN;\
+ type DIG_UPDATE_LOCATION;\
+ type OTG_DSC_START_POSITION_X;\
+ type OTG_DSC_START_POSITION_LINE_NUM;\
+ type OPTC_NUM_OF_INPUT_SEGMENT;\
+ type OPTC_SEG0_SRC_SEL;\
+ type OPTC_SEG1_SRC_SEL;\
+ type OPTC_MEM_SEL;\
+ type OPTC_DATA_FORMAT;\
+ type OPTC_DSC_MODE;\
+ type OPTC_DSC_BYTES_PER_PIXEL;\
+ type OPTC_DSC_SLICE_WIDTH;\
+ type OPTC_SEGMENT_WIDTH;\
+ type OPTC_DWB0_SOURCE_SELECT;\
+ type OPTC_DWB1_SOURCE_SELECT;\
+ type OTG_CRC_DSC_MODE;\
+ type OTG_CRC_DATA_STREAM_COMBINE_MODE;\
+ type OTG_CRC_DATA_STREAM_SPLIT_MODE;\
+ type OTG_CRC_DATA_FORMAT;
+#endif
struct dcn_optc_shift {
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_stream_encoder.h b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_stream_encoder.h
index f9b9e221c698..ed385b1477be 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_stream_encoder.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_stream_encoder.h
@@ -169,6 +169,14 @@ struct dcn10_stream_enc_registers {
uint32_t DP_SEC_METADATA_TRANSMISSION;
uint32_t HDMI_METADATA_PACKET_CONTROL;
uint32_t DP_SEC_FRAMING4;
+#if defined(CONFIG_DRM_AMD_DC_DCN3_0)
+ uint32_t DP_GSP11_CNTL;
+ uint32_t HDMI_GENERIC_PACKET_CONTROL6;
+ uint32_t HDMI_GENERIC_PACKET_CONTROL7;
+ uint32_t HDMI_GENERIC_PACKET_CONTROL8;
+ uint32_t HDMI_GENERIC_PACKET_CONTROL9;
+ uint32_t HDMI_GENERIC_PACKET_CONTROL10;
+#endif
uint32_t DIG_CLOCK_PATTERN;
};
@@ -483,14 +491,48 @@ struct dcn10_stream_enc_registers {
type DP_PIXEL_COMBINE;\
type DP_SST_SDP_SPLITTING
+#if defined(CONFIG_DRM_AMD_DC_DCN3_0)
+#define SE_REG_FIELD_LIST_DCN3_0(type) \
+ type HDMI_GENERIC8_CONT;\
+ type HDMI_GENERIC8_SEND;\
+ type HDMI_GENERIC8_LINE;\
+ type HDMI_GENERIC9_CONT;\
+ type HDMI_GENERIC9_SEND;\
+ type HDMI_GENERIC9_LINE;\
+ type HDMI_GENERIC10_CONT;\
+ type HDMI_GENERIC10_SEND;\
+ type HDMI_GENERIC10_LINE;\
+ type HDMI_GENERIC11_CONT;\
+ type HDMI_GENERIC11_SEND;\
+ type HDMI_GENERIC11_LINE;\
+ type HDMI_GENERIC12_CONT;\
+ type HDMI_GENERIC12_SEND;\
+ type HDMI_GENERIC12_LINE;\
+ type HDMI_GENERIC13_CONT;\
+ type HDMI_GENERIC13_SEND;\
+ type HDMI_GENERIC13_LINE;\
+ type HDMI_GENERIC14_CONT;\
+ type HDMI_GENERIC14_SEND;\
+ type HDMI_GENERIC14_LINE;\
+ type DP_SEC_GSP11_PPS;\
+ type DP_SEC_GSP11_ENABLE;\
+ type DP_SEC_GSP11_LINE_NUM
+#endif
+
struct dcn10_stream_encoder_shift {
SE_REG_FIELD_LIST_DCN1_0(uint8_t);
SE_REG_FIELD_LIST_DCN2_0(uint8_t);
+#if defined(CONFIG_DRM_AMD_DC_DCN3_0)
+ SE_REG_FIELD_LIST_DCN3_0(uint8_t);
+#endif
};
struct dcn10_stream_encoder_mask {
SE_REG_FIELD_LIST_DCN1_0(uint32_t);
SE_REG_FIELD_LIST_DCN2_0(uint32_t);
+#if defined(CONFIG_DRM_AMD_DC_DCN3_0)
+ SE_REG_FIELD_LIST_DCN3_0(uint32_t);
+#endif
};
struct dcn10_stream_encoder {
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dccg.h b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dccg.h
index 2205cb0204e7..06daf35bb587 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dccg.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dccg.h
@@ -76,18 +76,40 @@
type REFCLK_CLOCK_EN;\
type REFCLK_SRC_SEL;
+#if defined(CONFIG_DRM_AMD_DC_DCN3_0)
+#define DCCG3_REG_FIELD_LIST(type) \
+ type PHYASYMCLK_FORCE_EN;\
+ type PHYASYMCLK_FORCE_SRC_SEL;\
+ type PHYBSYMCLK_FORCE_EN;\
+ type PHYBSYMCLK_FORCE_SRC_SEL;\
+ type PHYCSYMCLK_FORCE_EN;\
+ type PHYCSYMCLK_FORCE_SRC_SEL;
+#endif
+
struct dccg_shift {
DCCG_REG_FIELD_LIST(uint8_t)
+#if defined(CONFIG_DRM_AMD_DC_DCN3_0)
+ DCCG3_REG_FIELD_LIST(uint8_t)
+#endif
};
struct dccg_mask {
DCCG_REG_FIELD_LIST(uint32_t)
+#if defined(CONFIG_DRM_AMD_DC_DCN3_0)
+ DCCG3_REG_FIELD_LIST(uint32_t)
+#endif
};
struct dccg_registers {
uint32_t DPPCLK_DTO_CTRL;
uint32_t DPPCLK_DTO_PARAM[6];
uint32_t REFCLK_CNTL;
+#if defined(CONFIG_DRM_AMD_DC_DCN3_0)
+ uint32_t HDMICHARCLK_CLOCK_CNTL[6];
+ uint32_t PHYASYMCLK_CLOCK_CNTL;
+ uint32_t PHYBSYMCLK_CLOCK_CNTL;
+ uint32_t PHYCSYMCLK_CLOCK_CNTL;
+#endif
};
struct dcn_dccg {
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dpp.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dpp.c
index 42bba7c9548b..4af96cc5d9d6 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dpp.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dpp.c
@@ -181,9 +181,11 @@ static void dpp2_cnv_setup (
break;
case SURFACE_PIXEL_FORMAT_GRPH_RGB111110_FIX:
pixel_format = 112;
+ alpha_en = 0;
break;
case SURFACE_PIXEL_FORMAT_GRPH_BGR101111_FIX:
pixel_format = 113;
+ alpha_en = 0;
break;
case SURFACE_PIXEL_FORMAT_VIDEO_ACrYCb2101010:
pixel_format = 114;
@@ -199,9 +201,11 @@ static void dpp2_cnv_setup (
break;
case SURFACE_PIXEL_FORMAT_GRPH_RGB111110_FLOAT:
pixel_format = 118;
+ alpha_en = 0;
break;
case SURFACE_PIXEL_FORMAT_GRPH_BGR101111_FLOAT:
pixel_format = 119;
+ alpha_en = 0;
break;
default:
break;
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dsc.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dsc.c
index 1b1ae9ce2799..3c6ecfe141bb 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dsc.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dsc.c
@@ -717,22 +717,5 @@ static void dsc_write_to_registers(struct display_stream_compressor *dsc, const
RANGE_MAX_QP14, reg_vals->pps.rc_range_params[14].range_max_qp,
RANGE_BPG_OFFSET14, reg_vals->pps.rc_range_params[14].range_bpg_offset);
- if (IS_FPGA_MAXIMUS_DC(dsc20->base.ctx->dce_environment)) {
- /* It's safe to do this as long as debug bus is not being used in DAL Diag environment.
- *
- * This is because DSCC_PPS_CONFIG4.INITIAL_DEC_DELAY is a read-only register field (because it's a decoder
- * value not required by DSC encoder). However, since decoding fails when this value is missing from PPS, it's
- * required to communicate this value to the PPS header. When testing on FPGA, the values for PPS header are
- * being read from Diag register dump. The register below is used in place of a scratch register to make
- * 'initial_dec_delay' available.
- */
-
- temp_int = reg_vals->pps.initial_dec_delay;
- REG_SET_4(DSCC_TEST_DEBUG_BUS_ROTATE, 0,
- DSCC_TEST_DEBUG_BUS0_ROTATE, temp_int & 0x1f,
- DSCC_TEST_DEBUG_BUS1_ROTATE, temp_int >> 5 & 0x1f,
- DSCC_TEST_DEBUG_BUS2_ROTATE, temp_int >> 10 & 0x1f,
- DSCC_TEST_DEBUG_BUS3_ROTATE, temp_int >> 15 & 0x1);
- }
}
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dsc.h b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dsc.h
index 9855a7ed0387..667640c4b288 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dsc.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dsc.h
@@ -78,7 +78,6 @@
SRI(DSCC_RATE_CONTROL_BUFFER1_MAX_FULLNESS_LEVEL, DSCC, id),\
SRI(DSCC_RATE_CONTROL_BUFFER2_MAX_FULLNESS_LEVEL, DSCC, id),\
SRI(DSCC_RATE_CONTROL_BUFFER3_MAX_FULLNESS_LEVEL, DSCC, id),\
- SRI(DSCC_TEST_DEBUG_BUS_ROTATE, DSCC, id),\
SRI(DSCCIF_CONFIG0, DSCCIF, id),\
SRI(DSCCIF_CONFIG1, DSCCIF, id),\
SRI(DSCRM_DSC_FORWARD_CONFIG, DSCRM, id)
@@ -95,8 +94,6 @@
DSC_SF(DSC_TOP0_DSC_TOP_CONTROL, DSC_CLOCK_EN, mask_sh), \
DSC_SF(DSC_TOP0_DSC_TOP_CONTROL, DSC_DISPCLK_R_GATE_DIS, mask_sh), \
DSC_SF(DSC_TOP0_DSC_TOP_CONTROL, DSC_DSCCLK_R_GATE_DIS, mask_sh), \
- DSC_SF(DSC_TOP0_DSC_DEBUG_CONTROL, DSC_DBG_EN, mask_sh), \
- DSC_SF(DSC_TOP0_DSC_DEBUG_CONTROL, DSC_TEST_CLOCK_MUX_SEL, mask_sh), \
DSC_SF(DSCC0_DSCC_CONFIG0, ICH_RESET_AT_END_OF_LINE, mask_sh), \
DSC_SF(DSCC0_DSCC_CONFIG0, NUMBER_OF_SLICES_PER_LINE, mask_sh), \
DSC_SF(DSCC0_DSCC_CONFIG0, ALTERNATE_ICH_ENCODING_EN, mask_sh), \
@@ -249,10 +246,6 @@
DSC_SF(DSCC0_DSCC_RATE_CONTROL_BUFFER1_MAX_FULLNESS_LEVEL, DSCC_RATE_CONTROL_BUFFER1_MAX_FULLNESS_LEVEL, mask_sh), \
DSC_SF(DSCC0_DSCC_RATE_CONTROL_BUFFER2_MAX_FULLNESS_LEVEL, DSCC_RATE_CONTROL_BUFFER2_MAX_FULLNESS_LEVEL, mask_sh), \
DSC_SF(DSCC0_DSCC_RATE_CONTROL_BUFFER3_MAX_FULLNESS_LEVEL, DSCC_RATE_CONTROL_BUFFER3_MAX_FULLNESS_LEVEL, mask_sh), \
- DSC_SF(DSCC0_DSCC_TEST_DEBUG_BUS_ROTATE, DSCC_TEST_DEBUG_BUS0_ROTATE, mask_sh), \
- DSC_SF(DSCC0_DSCC_TEST_DEBUG_BUS_ROTATE, DSCC_TEST_DEBUG_BUS1_ROTATE, mask_sh), \
- DSC_SF(DSCC0_DSCC_TEST_DEBUG_BUS_ROTATE, DSCC_TEST_DEBUG_BUS2_ROTATE, mask_sh), \
- DSC_SF(DSCC0_DSCC_TEST_DEBUG_BUS_ROTATE, DSCC_TEST_DEBUG_BUS3_ROTATE, mask_sh), \
DSC_SF(DSCCIF0_DSCCIF_CONFIG0, INPUT_INTERFACE_UNDERFLOW_RECOVERY_EN, mask_sh), \
DSC_SF(DSCCIF0_DSCCIF_CONFIG0, INPUT_INTERFACE_UNDERFLOW_OCCURRED_INT_EN, mask_sh), \
DSC_SF(DSCCIF0_DSCCIF_CONFIG0, INPUT_INTERFACE_UNDERFLOW_OCCURRED_STATUS, mask_sh), \
@@ -427,10 +420,6 @@
type DSCC_UPDATE_PENDING_STATUS; \
type DSCC_UPDATE_TAKEN_STATUS; \
type DSCC_UPDATE_TAKEN_ACK; \
- type DSCC_TEST_DEBUG_BUS0_ROTATE; \
- type DSCC_TEST_DEBUG_BUS1_ROTATE; \
- type DSCC_TEST_DEBUG_BUS2_ROTATE; \
- type DSCC_TEST_DEBUG_BUS3_ROTATE; \
type DSCC_RATE_BUFFER0_FULLNESS_LEVEL; \
type DSCC_RATE_BUFFER1_FULLNESS_LEVEL; \
type DSCC_RATE_BUFFER2_FULLNESS_LEVEL; \
@@ -503,7 +492,6 @@ struct dcn20_dsc_registers {
uint32_t DSCC_RATE_CONTROL_BUFFER1_MAX_FULLNESS_LEVEL;
uint32_t DSCC_RATE_CONTROL_BUFFER2_MAX_FULLNESS_LEVEL;
uint32_t DSCC_RATE_CONTROL_BUFFER3_MAX_FULLNESS_LEVEL;
- uint32_t DSCC_TEST_DEBUG_BUS_ROTATE;
uint32_t DSCCIF_CONFIG0;
uint32_t DSCCIF_CONFIG1;
uint32_t DSCRM_DSC_FORWARD_CONFIG;
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubbub.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubbub.c
index c0b21d7450d4..69d49551ab5d 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubbub.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubbub.c
@@ -153,6 +153,10 @@ bool hubbub2_dcc_support_pixel_format(
case SURFACE_PIXEL_FORMAT_GRPH_BGR101111_FIX:
case SURFACE_PIXEL_FORMAT_GRPH_RGB111110_FLOAT:
case SURFACE_PIXEL_FORMAT_GRPH_BGR101111_FLOAT:
+#if defined(CONFIG_DRM_AMD_DC_DCN3_0)
+ case SURFACE_PIXEL_FORMAT_GRPH_RGBE:
+ case SURFACE_PIXEL_FORMAT_GRPH_RGBE_ALPHA:
+#endif
*bytes_per_element = 4;
return true;
case SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616:
@@ -339,6 +343,11 @@ static enum dcn_hubbub_page_table_block_size page_table_block_size_to_hw(unsigne
case 65536:
block_size = DCN_PAGE_TABLE_BLOCK_SIZE_64KB;
break;
+#if defined(CONFIG_DRM_AMD_DC_DCN3_0)
+ case 32768:
+ block_size = DCN_PAGE_TABLE_BLOCK_SIZE_32KB;
+ break;
+#endif
default:
ASSERT(false);
block_size = page_table_block_size;
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubp.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubp.c
index 84d7ac5dd206..bb920d0e0b89 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubp.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubp.c
@@ -336,6 +336,10 @@ void hubp2_program_size(
*/
use_pitch_c = format >= SURFACE_PIXEL_FORMAT_VIDEO_BEGIN
&& format < SURFACE_PIXEL_FORMAT_SUBSAMPLE_END;
+#if defined(CONFIG_DRM_AMD_DC_DCN3_0)
+ use_pitch_c = use_pitch_c
+ || (format == SURFACE_PIXEL_FORMAT_GRPH_RGBE_ALPHA);
+#endif
if (use_pitch_c) {
ASSERT(plane_size->chroma_pitch != 0);
/* Chroma pitch zero can cause system hang! */
@@ -360,6 +364,10 @@ void hubp2_program_size(
PITCH, pitch, META_PITCH, meta_pitch);
use_pitch_c = format >= SURFACE_PIXEL_FORMAT_VIDEO_BEGIN;
+#if defined(CONFIG_DRM_AMD_DC_DCN3_0)
+ use_pitch_c = use_pitch_c
+ || (format == SURFACE_PIXEL_FORMAT_GRPH_RGBE_ALPHA);
+#endif
if (use_pitch_c)
REG_UPDATE_2(DCSURF_SURFACE_PITCH_C,
PITCH_C, pitch_c, META_PITCH_C, meta_pitch_c);
@@ -505,6 +513,18 @@ void hubp2_program_pixel_format(
REG_UPDATE(DCSURF_SURFACE_CONFIG,
SURFACE_PIXEL_FORMAT, 119);
break;
+#if defined(CONFIG_DRM_AMD_DC_DCN3_0)
+ case SURFACE_PIXEL_FORMAT_GRPH_RGBE:
+ REG_UPDATE_2(DCSURF_SURFACE_CONFIG,
+ SURFACE_PIXEL_FORMAT, 116,
+ ALPHA_PLANE_EN, 0);
+ break;
+ case SURFACE_PIXEL_FORMAT_GRPH_RGBE_ALPHA:
+ REG_UPDATE_2(DCSURF_SURFACE_CONFIG,
+ SURFACE_PIXEL_FORMAT, 116,
+ ALPHA_PLANE_EN, 1);
+ break;
+#endif
default:
BREAK_TO_DEBUGGER();
break;
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubp.h b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubp.h
index 8c04a3606a54..4a2c93087459 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubp.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubp.h
@@ -157,6 +157,12 @@
uint32_t VBLANK_PARAMETERS_5;\
uint32_t VBLANK_PARAMETERS_6
+#if defined(CONFIG_DRM_AMD_DC_DCN3_0)
+#define DCN30_HUBP_REG_COMMON_VARIABLE_LIST \
+ DCN21_HUBP_REG_COMMON_VARIABLE_LIST;\
+ uint32_t DCN_DMDATA_VM_CNTL
+#endif
+
#define DCN2_HUBP_REG_FIELD_VARIABLE_LIST(type) \
DCN_HUBP_REG_FIELD_BASE_LIST(type); \
type DMDATA_ADDRESS_HIGH;\
@@ -192,17 +198,52 @@
type REFCYC_PER_META_CHUNK_FLIP_C; \
type VM_GROUP_SIZE
+#if defined(CONFIG_DRM_AMD_DC_DCN3_0)
+#define DCN30_HUBP_REG_FIELD_VARIABLE_LIST(type) \
+ DCN21_HUBP_REG_FIELD_VARIABLE_LIST(type);\
+ type PRIMARY_SURFACE_DCC_IND_BLK;\
+ type SECONDARY_SURFACE_DCC_IND_BLK;\
+ type PRIMARY_SURFACE_DCC_IND_BLK_C;\
+ type SECONDARY_SURFACE_DCC_IND_BLK_C;\
+ type ALPHA_PLANE_EN;\
+ type REFCYC_PER_VM_DMDATA;\
+ type DMDATA_VM_FAULT_STATUS;\
+ type DMDATA_VM_FAULT_STATUS_CLEAR; \
+ type DMDATA_VM_UNDERFLOW_STATUS;\
+ type DMDATA_VM_LATE_STATUS;\
+ type DMDATA_VM_UNDERFLOW_STATUS_CLEAR; \
+ type DMDATA_VM_DONE; \
+ type CROSSBAR_SRC_Y_G; \
+ type CROSSBAR_SRC_ALPHA; \
+ type PACK_3TO2_ELEMENT_DISABLE; \
+ type ROW_TTU_MODE; \
+ type NUM_PKRS
+#endif
struct dcn_hubp2_registers {
+#if defined(CONFIG_DRM_AMD_DC_DCN3_0)
+ DCN30_HUBP_REG_COMMON_VARIABLE_LIST;
+#else
DCN21_HUBP_REG_COMMON_VARIABLE_LIST;
+#endif
};
struct dcn_hubp2_shift {
+#if defined(CONFIG_DRM_AMD_DC_DCN3_0)
+ DCN30_HUBP_REG_FIELD_VARIABLE_LIST(uint8_t);
+#else
DCN21_HUBP_REG_FIELD_VARIABLE_LIST(uint8_t);
+#endif
+
};
struct dcn_hubp2_mask {
+#if defined(CONFIG_DRM_AMD_DC_DCN3_0)
+ DCN30_HUBP_REG_FIELD_VARIABLE_LIST(uint32_t);
+#else
DCN21_HUBP_REG_FIELD_VARIABLE_LIST(uint32_t);
+#endif
+
};
struct dcn20_hubp {
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c
index da5333d165ac..5621c95177d2 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c
@@ -49,6 +49,8 @@
#include "dc_link_dp.h"
#include "vm_helper.h"
#include "dccg.h"
+#include "dc_dmub_srv.h"
+#include "dce/dmub_hw_lock_mgr.h"
#define DC_LOGGER_INIT(logger)
@@ -291,12 +293,20 @@ void dcn20_init_blank(
/* get the OPTC source */
tg->funcs->get_optc_source(tg, &num_opps, &opp_id_src0, &opp_id_src1);
- ASSERT(opp_id_src0 < dc->res_pool->res_cap->num_opp);
+
+ if (opp_id_src0 >= dc->res_pool->res_cap->num_opp) {
+ ASSERT(false);
+ return;
+ }
opp = dc->res_pool->opps[opp_id_src0];
if (num_opps == 2) {
otg_active_width = otg_active_width / 2;
- ASSERT(opp_id_src1 < dc->res_pool->res_cap->num_opp);
+
+ if (opp_id_src1 >= dc->res_pool->res_cap->num_opp) {
+ ASSERT(false);
+ return;
+ }
bottom_opp = dc->res_pool->opps[opp_id_src1];
}
@@ -601,6 +611,31 @@ void dcn20_disable_plane(struct dc *dc, struct pipe_ctx *pipe_ctx)
pipe_ctx->pipe_idx);
}
+#if defined(CONFIG_DRM_AMD_DC_DCN3_0)
+static int calc_mpc_flow_ctrl_cnt(const struct dc_stream_state *stream,
+ int opp_cnt)
+{
+ bool hblank_halved = optc2_is_two_pixels_per_containter(&stream->timing);
+ int flow_ctrl_cnt;
+
+ if (opp_cnt >= 2)
+ hblank_halved = true;
+
+ flow_ctrl_cnt = stream->timing.h_total - stream->timing.h_addressable -
+ stream->timing.h_border_left -
+ stream->timing.h_border_right;
+
+ if (hblank_halved)
+ flow_ctrl_cnt /= 2;
+
+ /* ODM combine 4:1 case */
+ if (opp_cnt == 4)
+ flow_ctrl_cnt /= 2;
+
+ return flow_ctrl_cnt;
+}
+#endif
+
enum dc_status dcn20_enable_stream_timing(
struct pipe_ctx *pipe_ctx,
struct dc_state *context,
@@ -614,6 +649,15 @@ enum dc_status dcn20_enable_stream_timing(
int opp_cnt = 1;
int opp_inst[MAX_PIPES] = { pipe_ctx->stream_res.opp->inst };
+#if defined(CONFIG_DRM_AMD_DC_DCN3_0)
+ bool interlace = stream->timing.flags.INTERLACE;
+ int i;
+
+ struct mpc_dwb_flow_control flow_control;
+ struct mpc *mpc = dc->res_pool->mpc;
+ bool rate_control_2x_pclk = (interlace || optc2_is_two_pixels_per_containter(&stream->timing));
+
+#endif
/* by upper caller loop, pipe0 is parent pipe and be called first.
* back end is set up by for pipe0. Other children pipe share back end
* with pipe 0. No program is needed.
@@ -660,6 +704,21 @@ enum dc_status dcn20_enable_stream_timing(
pipe_ctx->stream->signal,
true);
+#if defined(CONFIG_DRM_AMD_DC_DCN3_0)
+ rate_control_2x_pclk = rate_control_2x_pclk || opp_cnt > 1;
+ flow_control.flow_ctrl_mode = 0;
+ flow_control.flow_ctrl_cnt0 = 0x80;
+ flow_control.flow_ctrl_cnt1 = calc_mpc_flow_ctrl_cnt(stream, opp_cnt);
+ if (mpc->funcs->set_out_rate_control) {
+ for (i = 0; i < opp_cnt; ++i) {
+ mpc->funcs->set_out_rate_control(
+ mpc, opp_inst[i],
+ true,
+ rate_control_2x_pclk,
+ &flow_control);
+ }
+ }
+#endif
for (odm_pipe = pipe_ctx->next_odm_pipe; odm_pipe; odm_pipe = odm_pipe->next_odm_pipe)
odm_pipe->stream_res.opp->funcs->opp_pipe_clock_control(
odm_pipe->stream_res.opp,
@@ -996,8 +1055,7 @@ void dcn20_blank_pixel_data(
if (!blank)
if (stream_res->abm) {
- stream_res->abm->funcs->set_pipe(stream_res->abm, stream_res->tg->inst + 1,
- stream->link->panel_cntl->inst);
+ dc->hwss.set_pipe(pipe_ctx);
stream_res->abm->funcs->set_abm_level(stream_res->abm, stream->abm_level);
}
}
@@ -1138,7 +1196,21 @@ void dcn20_pipe_control_lock(
(!flip_immediate && pipe->stream_res.gsl_group > 0))
dcn20_setup_gsl_group_as_lock(dc, pipe, flip_immediate);
- if (pipe->plane_state != NULL && pipe->plane_state->triplebuffer_flips) {
+ if (pipe->stream && should_use_dmub_lock(pipe->stream->link)) {
+ union dmub_hw_lock_flags hw_locks = { 0 };
+ struct dmub_hw_lock_inst_flags inst_flags = { 0 };
+
+ hw_locks.bits.lock_pipe = 1;
+ inst_flags.otg_inst = pipe->stream_res.tg->inst;
+
+ if (pipe->plane_state != NULL)
+ hw_locks.bits.triple_buffer_lock = pipe->plane_state->triplebuffer_flips;
+
+ dmub_hw_lock_mgr_cmd(dc->ctx->dmub_srv,
+ lock,
+ &hw_locks,
+ &inst_flags);
+ } else if (pipe->plane_state != NULL && pipe->plane_state->triplebuffer_flips) {
if (lock)
pipe->stream_res.tg->funcs->triplebuffer_lock(pipe->stream_res.tg);
else
@@ -1209,14 +1281,13 @@ static void dcn20_detect_pipe_changes(struct pipe_ctx *old_pipe, struct pipe_ctx
if (old_pipe->stream_res.tg != new_pipe->stream_res.tg)
new_pipe->update_flags.bits.tg_changed = 1;
- /* Detect mpcc blending changes, only dpp inst and bot matter here */
+ /*
+ * Detect mpcc blending changes, only dpp inst and opp matter here,
+ * mpccs getting removed/inserted update connected ones during their own
+ * programming
+ */
if (old_pipe->plane_res.dpp != new_pipe->plane_res.dpp
- || old_pipe->stream_res.opp != new_pipe->stream_res.opp
- || (!old_pipe->bottom_pipe && new_pipe->bottom_pipe)
- || (old_pipe->bottom_pipe && !new_pipe->bottom_pipe)
- || (old_pipe->bottom_pipe && new_pipe->bottom_pipe
- && old_pipe->bottom_pipe->plane_res.mpcc_inst
- != new_pipe->bottom_pipe->plane_res.mpcc_inst))
+ || old_pipe->stream_res.opp != new_pipe->stream_res.opp)
new_pipe->update_flags.bits.mpcc = 1;
/* Detect dppclk change */
@@ -1400,6 +1471,37 @@ static void dcn20_update_dchubp_dpp(
if (pipe_ctx->update_flags.bits.enable || pipe_ctx->update_flags.bits.opp_changed
|| pipe_ctx->stream->update_flags.bits.gamut_remap
|| pipe_ctx->stream->update_flags.bits.out_csc) {
+#if defined(CONFIG_DRM_AMD_DC_DCN3_0)
+ struct mpc *mpc = pipe_ctx->stream_res.opp->ctx->dc->res_pool->mpc;
+
+ if (mpc->funcs->set_gamut_remap) {
+ int i;
+ int mpcc_id = hubp->inst;
+ struct mpc_grph_gamut_adjustment adjust;
+ bool enable_remap_dpp = false;
+
+ memset(&adjust, 0, sizeof(adjust));
+ adjust.gamut_adjust_type = GRAPHICS_GAMUT_ADJUST_TYPE_BYPASS;
+ /* save the enablement of gamut remap for dpp*/
+ enable_remap_dpp = pipe_ctx->stream->gamut_remap_matrix.enable_remap;
+ /*force bypass gamut remap for dpp/cm*/
+ pipe_ctx->stream->gamut_remap_matrix.enable_remap = false;
+ dc->hwss.program_gamut_remap(pipe_ctx);
+ /*restore gamut remap flag for the top plane and use this remap into mpc*/
+ if (pipe_ctx->top_pipe == NULL)
+ pipe_ctx->stream->gamut_remap_matrix.enable_remap = enable_remap_dpp;
+ else
+ pipe_ctx->stream->gamut_remap_matrix.enable_remap = false;
+
+ if (pipe_ctx->stream->gamut_remap_matrix.enable_remap == true) {
+ adjust.gamut_adjust_type = GRAPHICS_GAMUT_ADJUST_TYPE_SW;
+ for (i = 0; i < CSC_TEMPERATURE_MATRIX_SIZE; i++)
+ adjust.temperature_matrix[i] =
+ pipe_ctx->stream->gamut_remap_matrix.matrix[i];
+ }
+ mpc->funcs->set_gamut_remap(mpc, mpcc_id, &adjust);
+ } else
+#endif
/* dpp/cm gamut remap*/
dc->hwss.program_gamut_remap(pipe_ctx);
@@ -1436,9 +1538,6 @@ static void dcn20_update_dchubp_dpp(
hubp->power_gated = false;
}
- if (hubp->funcs->apply_PLAT_54186_wa && viewport_changed)
- hubp->funcs->apply_PLAT_54186_wa(hubp, &plane_state->address);
-
if (pipe_ctx->update_flags.bits.enable || plane_state->update_flags.bits.addr_update)
hws->funcs.update_plane_addr(dc, pipe_ctx);
@@ -2162,6 +2261,11 @@ void dcn20_update_mpcc(struct dc *dc, struct pipe_ctx *pipe_ctx)
blnd_cfg.bottom_inside_gain = 0x1f000;
blnd_cfg.bottom_outside_gain = 0x1f000;
blnd_cfg.pre_multiplied_alpha = per_pixel_alpha;
+#if defined(CONFIG_DRM_AMD_DC_DCN3_0)
+ if (pipe_ctx->plane_state->format
+ == SURFACE_PIXEL_FORMAT_GRPH_RGBE_ALPHA)
+ blnd_cfg.pre_multiplied_alpha = false;
+#endif
/*
* TODO: remove hack
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_init.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_init.c
index 2fbde4241559..bb9e9bec2f28 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_init.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_init.c
@@ -86,6 +86,7 @@ static const struct hw_sequencer_funcs dcn20_funcs = {
.calc_vupdate_position = dcn10_calc_vupdate_position,
.set_backlight_level = dce110_set_backlight_level,
.set_abm_immediate_disable = dce110_set_abm_immediate_disable,
+ .set_pipe = dce110_set_pipe,
};
static const struct hwseq_private_funcs dcn20_private_funcs = {
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_link_encoder.h b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_link_encoder.h
index 284a1ee4d249..db09f40075c2 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_link_encoder.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_link_encoder.h
@@ -147,8 +147,7 @@
LE_SF(DPCSTX0_DPCSTX_TX_CNTL, DPCS_TX_DATA_SWAP, mask_sh),\
LE_SF(DPCSTX0_DPCSTX_TX_CNTL, DPCS_TX_DATA_ORDER_INVERT, mask_sh),\
LE_SF(DPCSTX0_DPCSTX_TX_CNTL, DPCS_TX_FIFO_EN, mask_sh),\
- LE_SF(DPCSTX0_DPCSTX_TX_CNTL, DPCS_TX_FIFO_RD_START_DELAY, mask_sh),\
- LE_SF(DPCSTX0_DPCSTX_DEBUG_CONFIG, DPCS_DBG_CBUS_DIS, mask_sh)
+ LE_SF(DPCSTX0_DPCSTX_TX_CNTL, DPCS_TX_FIFO_RD_START_DELAY, mask_sh)
#define DPCS_DCN2_MASK_SH_LIST(mask_sh)\
DPCS_MASK_SH_LIST(mask_sh),\
@@ -274,6 +273,10 @@ struct mpll_cfg {
bool dp_tx1_vergdrv_byp;
bool dp_tx2_vergdrv_byp;
bool dp_tx3_vergdrv_byp;
+#if defined(CONFIG_DRM_AMD_DC_DCN3_0)
+ uint32_t tx_peaking_lvl;
+ uint32_t ctr_reqs_pll;
+#endif
};
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c
index cef1aa938ab5..0983bcc25117 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c
@@ -157,7 +157,7 @@ struct _vcs_dpi_ip_params_st dcn2_0_ip = {
.number_of_cursors = 1,
};
-struct _vcs_dpi_ip_params_st dcn2_0_nv14_ip = {
+static struct _vcs_dpi_ip_params_st dcn2_0_nv14_ip = {
.odm_capable = 1,
.gpuvm_enable = 0,
.hostvm_enable = 0,
@@ -226,7 +226,7 @@ struct _vcs_dpi_ip_params_st dcn2_0_nv14_ip = {
.number_of_cursors = 1,
};
-struct _vcs_dpi_soc_bounding_box_st dcn2_0_soc = {
+static struct _vcs_dpi_soc_bounding_box_st dcn2_0_soc = {
/* Defaults that get patched on driver load from firmware. */
.clock_limits = {
{
@@ -338,7 +338,7 @@ struct _vcs_dpi_soc_bounding_box_st dcn2_0_soc = {
.use_urgent_burst_bw = 0
};
-struct _vcs_dpi_soc_bounding_box_st dcn2_0_nv14_soc = {
+static struct _vcs_dpi_soc_bounding_box_st dcn2_0_nv14_soc = {
.clock_limits = {
{
.state = 0,
@@ -449,7 +449,7 @@ struct _vcs_dpi_soc_bounding_box_st dcn2_0_nv14_soc = {
.use_urgent_burst_bw = 0
};
-struct _vcs_dpi_soc_bounding_box_st dcn2_0_nv12_soc = { 0 };
+static struct _vcs_dpi_soc_bounding_box_st dcn2_0_nv12_soc = { 0 };
#ifndef mmDP0_DP_DPHY_INTERNAL_CTRL
#define mmDP0_DP_DPHY_INTERNAL_CTRL 0x210f
@@ -1323,7 +1323,7 @@ static struct panel_cntl *dcn20_panel_cntl_create(const struct panel_cntl_init_d
return &panel_cntl->base;
}
-struct clock_source *dcn20_clock_source_create(
+static struct clock_source *dcn20_clock_source_create(
struct dc_context *ctx,
struct dc_bios *bios,
enum clock_source_id id,
@@ -2015,6 +2015,10 @@ int dcn20_populate_dml_pipes_from_context(
pipe_cnt = i;
continue;
}
+
+ if (res_ctx->pipe_ctx[pipe_cnt].stream == res_ctx->pipe_ctx[i].stream)
+ continue;
+
if (dc->debug.disable_timing_sync || !resource_are_streams_timing_synchronizable(
res_ctx->pipe_ctx[pipe_cnt].stream,
res_ctx->pipe_ctx[i].stream)) {
@@ -2029,6 +2033,9 @@ int dcn20_populate_dml_pipes_from_context(
unsigned int front_porch;
int output_bpc;
+#if defined(CONFIG_DRM_AMD_DC_DCN3_0)
+ struct audio_check aud_check = {0};
+#endif
if (!res_ctx->pipe_ctx[i].stream)
continue;
@@ -2083,6 +2090,11 @@ int dcn20_populate_dml_pipes_from_context(
case 1:
pipes[pipe_cnt].pipe.dest.odm_combine = dm_odm_combine_mode_2to1;
break;
+#if defined(CONFIG_DRM_AMD_DC_DCN3_0)
+ case 3:
+ pipes[pipe_cnt].pipe.dest.odm_combine = dm_odm_combine_mode_4to1;
+ break;
+#endif
default:
pipes[pipe_cnt].pipe.dest.odm_combine = dm_odm_combine_mode_disabled;
}
@@ -2179,6 +2191,11 @@ int dcn20_populate_dml_pipes_from_context(
/* todo: default max for now, until there is logic reflecting this in dc*/
pipes[pipe_cnt].dout.output_bpc = 12;
+#if defined(CONFIG_DRM_AMD_DC_DCN3_0)
+ /*fill up the audio sample rate*/
+ get_audio_check(&res_ctx->pipe_ctx[i].stream->audio_info, &aud_check);
+ pipes[pipe_cnt].dout.max_audio_sample_rate = aud_check.max_audiosample_rate;
+#endif
/*
* For graphic plane, cursor number is 1, nv12 is 0
* bw calculations due to cursor on/off
@@ -2226,6 +2243,12 @@ int dcn20_populate_dml_pipes_from_context(
pipes[pipe_cnt].pipe.src.viewport_width /= 2;
pipes[pipe_cnt].pipe.dest.recout_width /= 2;
}
+#if defined(CONFIG_DRM_AMD_DC_DCN3_0)
+ else if (pipes[pipe_cnt].pipe.dest.odm_combine == dm_odm_combine_mode_4to1) {
+ pipes[pipe_cnt].pipe.src.viewport_width /= 4;
+ pipes[pipe_cnt].pipe.dest.recout_width /= 4;
+ }
+#endif
} else {
struct dc_plane_state *pln = res_ctx->pipe_ctx[i].plane_state;
struct scaler_data *scl = &res_ctx->pipe_ctx[i].plane_res.scl_data;
@@ -2234,6 +2257,14 @@ int dcn20_populate_dml_pipes_from_context(
pipes[pipe_cnt].pipe.src.is_hsplit = (res_ctx->pipe_ctx[i].bottom_pipe && res_ctx->pipe_ctx[i].bottom_pipe->plane_state == pln)
|| (res_ctx->pipe_ctx[i].top_pipe && res_ctx->pipe_ctx[i].top_pipe->plane_state == pln)
|| pipes[pipe_cnt].pipe.dest.odm_combine != dm_odm_combine_mode_disabled;
+
+ /* stereo is never split, nor odm combine */
+ if (pln->stereo_format == PLANE_STEREO_FORMAT_SIDE_BY_SIDE ||
+ pln->stereo_format == PLANE_STEREO_FORMAT_TOP_AND_BOTTOM) {
+ pipes[pipe_cnt].pipe.src.is_hsplit = false;
+ pipes[pipe_cnt].pipe.src.hsplit_grp = res_ctx->pipe_ctx[i].pipe_idx;
+ }
+
pipes[pipe_cnt].pipe.src.source_scan = pln->rotation == ROTATION_ANGLE_90
|| pln->rotation == ROTATION_ANGLE_270 ? dm_vert : dm_horz;
pipes[pipe_cnt].pipe.src.viewport_y_y = scl->viewport_unadjusted.y;
@@ -2246,7 +2277,12 @@ int dcn20_populate_dml_pipes_from_context(
pipes[pipe_cnt].pipe.src.surface_height_y = pln->plane_size.surface_size.height;
pipes[pipe_cnt].pipe.src.surface_width_c = pln->plane_size.chroma_size.width;
pipes[pipe_cnt].pipe.src.surface_height_c = pln->plane_size.chroma_size.height;
+#if defined(CONFIG_DRM_AMD_DC_DCN3_0)
+ if (pln->format == SURFACE_PIXEL_FORMAT_GRPH_RGBE_ALPHA
+ || pln->format >= SURFACE_PIXEL_FORMAT_VIDEO_BEGIN) {
+#else
if (pln->format >= SURFACE_PIXEL_FORMAT_VIDEO_BEGIN) {
+#endif
pipes[pipe_cnt].pipe.src.data_pitch = pln->plane_size.surface_pitch;
pipes[pipe_cnt].pipe.src.data_pitch_c = pln->plane_size.chroma_pitch;
pipes[pipe_cnt].pipe.src.meta_pitch = pln->dcc.meta_pitch;
@@ -2262,6 +2298,10 @@ int dcn20_populate_dml_pipes_from_context(
pipes[pipe_cnt].pipe.dest.full_recout_width = scl->recout.width;
if (pipes[pipe_cnt].pipe.dest.odm_combine == dm_odm_combine_mode_2to1)
pipes[pipe_cnt].pipe.dest.full_recout_width *= 2;
+#if defined(CONFIG_DRM_AMD_DC_DCN3_0)
+ else if (pipes[pipe_cnt].pipe.dest.odm_combine == dm_odm_combine_mode_4to1)
+ pipes[pipe_cnt].pipe.dest.full_recout_width *= 4;
+#endif
else {
struct pipe_ctx *split_pipe = res_ctx->pipe_ctx[i].bottom_pipe;
@@ -2318,6 +2358,11 @@ int dcn20_populate_dml_pipes_from_context(
case SURFACE_PIXEL_FORMAT_GRPH_PALETA_256_COLORS:
pipes[pipe_cnt].pipe.src.source_format = dm_444_8;
break;
+#if defined(CONFIG_DRM_AMD_DC_DCN3_0)
+ case SURFACE_PIXEL_FORMAT_GRPH_RGBE_ALPHA:
+ pipes[pipe_cnt].pipe.src.source_format = dm_rgbe_alpha;
+ break;
+#endif
default:
pipes[pipe_cnt].pipe.src.source_format = dm_444_32;
break;
@@ -2629,6 +2674,23 @@ int dcn20_validate_apply_pipe_split_flags(
if (plane_count > dc->res_pool->pipe_count / 2)
avoid_split = true;
+ /* W/A: Mode timing with borders may not work well with pipe split, avoid for this corner case */
+ for (i = 0; i < dc->res_pool->pipe_count; i++) {
+ struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
+ struct dc_crtc_timing timing;
+
+ if (!pipe->stream)
+ continue;
+ else {
+ timing = pipe->stream->timing;
+ if (timing.h_border_left + timing.h_border_right
+ + timing.v_border_top + timing.v_border_bottom > 0) {
+ avoid_split = true;
+ break;
+ }
+ }
+ }
+
/* Avoid split loop looks for lowest voltage level that allows most unsplit pipes possible */
if (avoid_split) {
for (i = 0, pipe_idx = 0; i < dc->res_pool->pipe_count; i++) {
@@ -2678,6 +2740,17 @@ int dcn20_validate_apply_pipe_split_flags(
split[i] = 2;
v->ODMCombineEnablePerState[vlevel][pipe_plane] = dm_odm_combine_mode_2to1;
}
+#if defined(CONFIG_DRM_AMD_DC_DCN3_0)
+ if (dc->debug.force_odm_combine_4to1 & (1 << pipe->stream_res.tg->inst)) {
+ split[i] = 4;
+ v->ODMCombineEnablePerState[vlevel][pipe_plane] = dm_odm_combine_mode_4to1;
+ }
+ /*420 format workaround*/
+ if (pipe->stream->timing.h_addressable > 7680 &&
+ pipe->stream->timing.pixel_encoding == PIXEL_ENCODING_YCBCR420) {
+ split[i] = 4;
+ }
+#endif
v->ODMCombineEnabled[pipe_plane] =
v->ODMCombineEnablePerState[vlevel][pipe_plane];
@@ -2836,8 +2909,8 @@ bool dcn20_fast_validate_bw(
dcn20_split_stream_for_mpc(
&context->res_ctx, dc->res_pool,
pipe, hsplit_pipe);
- if (!resource_build_scaling_params(pipe) || !resource_build_scaling_params(hsplit_pipe))
- goto validate_fail;
+ resource_build_scaling_params(pipe);
+ resource_build_scaling_params(hsplit_pipe);
}
pipe_split_from[hsplit_pipe->pipe_idx] = pipe_idx;
}
@@ -4053,8 +4126,12 @@ static bool dcn20_resource_construct(
// to be consumed. We could have created dcn20_init_hw to get
// the same effect by checking ASIC rev, but there was a
// request at some point to not check ASIC rev on hw sequencer.
- if (ASICREV_IS_NAVI12_P(dc->ctx->asic_id.hw_internal_rev))
+ if (ASICREV_IS_NAVI12_P(dc->ctx->asic_id.hw_internal_rev)) {
dc->hwseq->funcs.enable_power_gating_plane = NULL;
+ dc->debug.disable_dpp_power_gate = true;
+ dc->debug.disable_hubp_power_gate = true;
+ }
+
dc->caps.max_planes = pool->base.pipe_count;
diff --git a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hubp.c b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hubp.c
index 960a0716dde5..f9045852728f 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hubp.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hubp.c
@@ -225,116 +225,6 @@ void hubp21_set_viewport(
SEC_VIEWPORT_Y_START_C, viewport_c->y);
}
-static void hubp21_apply_PLAT_54186_wa(
- struct hubp *hubp,
- const struct dc_plane_address *address)
-{
- struct dcn21_hubp *hubp21 = TO_DCN21_HUBP(hubp);
- struct dc_debug_options *debug = &hubp->ctx->dc->debug;
- unsigned int chroma_bpe = 2;
- unsigned int luma_addr_high_part = 0;
- unsigned int row_height = 0;
- unsigned int chroma_pitch = 0;
- unsigned int viewport_c_height = 0;
- unsigned int viewport_c_width = 0;
- unsigned int patched_viewport_height = 0;
- unsigned int patched_viewport_width = 0;
- unsigned int rotation_angle = 0;
- unsigned int pix_format = 0;
- unsigned int h_mirror_en = 0;
- unsigned int tile_blk_size = 64 * 1024; /* 64KB for 64KB SW, 4KB for 4KB SW */
-
-
- if (!debug->nv12_iflip_vm_wa)
- return;
-
- REG_GET(DCHUBP_REQ_SIZE_CONFIG_C,
- PTE_ROW_HEIGHT_LINEAR_C, &row_height);
-
- REG_GET_2(DCSURF_PRI_VIEWPORT_DIMENSION_C,
- PRI_VIEWPORT_WIDTH_C, &viewport_c_width,
- PRI_VIEWPORT_HEIGHT_C, &viewport_c_height);
-
- REG_GET(DCSURF_PRIMARY_SURFACE_ADDRESS_HIGH_C,
- PRIMARY_SURFACE_ADDRESS_HIGH_C, &luma_addr_high_part);
-
- REG_GET(DCSURF_SURFACE_PITCH_C,
- PITCH_C, &chroma_pitch);
-
- chroma_pitch += 1;
-
- REG_GET_3(DCSURF_SURFACE_CONFIG,
- SURFACE_PIXEL_FORMAT, &pix_format,
- ROTATION_ANGLE, &rotation_angle,
- H_MIRROR_EN, &h_mirror_en);
-
- /* reset persistent cached data */
- hubp21->PLAT_54186_wa_chroma_addr_offset = 0;
- /* apply wa only for NV12 surface with scatter gather enabled with viewport > 512 along
- * the vertical direction*/
- if (address->type != PLN_ADDR_TYPE_VIDEO_PROGRESSIVE ||
- address->video_progressive.luma_addr.high_part == 0xf4)
- return;
-
- if ((rotation_angle == ROTATION_ANGLE_0 || rotation_angle == ROTATION_ANGLE_180)
- && viewport_c_height <= 512)
- return;
-
- if ((rotation_angle == ROTATION_ANGLE_90 || rotation_angle == ROTATION_ANGLE_270)
- && viewport_c_width <= 512)
- return;
-
- switch (rotation_angle) {
- case ROTATION_ANGLE_0: /* 0 degree rotation */
- row_height = 128;
- patched_viewport_height = (viewport_c_height / row_height + 1) * row_height + 1;
- patched_viewport_width = viewport_c_width;
- hubp21->PLAT_54186_wa_chroma_addr_offset = 0;
- break;
- case ROTATION_ANGLE_180: /* 180 degree rotation */
- row_height = 128;
- patched_viewport_height = viewport_c_height + row_height;
- patched_viewport_width = viewport_c_width;
- hubp21->PLAT_54186_wa_chroma_addr_offset = 0 - chroma_pitch * row_height * chroma_bpe;
- break;
- case ROTATION_ANGLE_90: /* 90 degree rotation */
- row_height = 256;
- if (h_mirror_en) {
- patched_viewport_height = viewport_c_height;
- patched_viewport_width = viewport_c_width + row_height;
- hubp21->PLAT_54186_wa_chroma_addr_offset = 0;
- } else {
- patched_viewport_height = viewport_c_height;
- patched_viewport_width = viewport_c_width + row_height;
- hubp21->PLAT_54186_wa_chroma_addr_offset = 0 - tile_blk_size;
- }
- break;
- case ROTATION_ANGLE_270: /* 270 degree rotation */
- row_height = 256;
- if (h_mirror_en) {
- patched_viewport_height = viewport_c_height;
- patched_viewport_width = viewport_c_width + row_height;
- hubp21->PLAT_54186_wa_chroma_addr_offset = 0 - tile_blk_size;
- } else {
- patched_viewport_height = viewport_c_height;
- patched_viewport_width = viewport_c_width + row_height;
- hubp21->PLAT_54186_wa_chroma_addr_offset = 0;
- }
- break;
- default:
- ASSERT(0);
- break;
- }
-
- /* catch cases where viewport keep growing */
- ASSERT(patched_viewport_height && patched_viewport_height < 5000);
- ASSERT(patched_viewport_width && patched_viewport_width < 5000);
-
- REG_UPDATE_2(DCSURF_PRI_VIEWPORT_DIMENSION_C,
- PRI_VIEWPORT_WIDTH_C, patched_viewport_width,
- PRI_VIEWPORT_HEIGHT_C, patched_viewport_height);
-}
-
void hubp21_set_vm_system_aperture_settings(struct hubp *hubp,
struct vm_system_aperture_param *apt)
{
@@ -812,8 +702,6 @@ bool hubp21_program_surface_flip_and_addr(
const struct dc_plane_address *address,
bool flip_immediate)
{
- struct dc_debug_options *debug = &hubp->ctx->dc->debug;
- struct dcn21_hubp *hubp21 = TO_DCN21_HUBP(hubp);
struct surface_flip_registers flip_regs = { 0 };
flip_regs.vmid = address->vmid;
@@ -859,12 +747,8 @@ bool hubp21_program_surface_flip_and_addr(
flip_regs.DCSURF_PRIMARY_SURFACE_ADDRESS_HIGH =
address->video_progressive.luma_addr.high_part;
- if (debug->nv12_iflip_vm_wa) {
- flip_regs.DCSURF_PRIMARY_SURFACE_ADDRESS_C =
- address->video_progressive.chroma_addr.low_part + hubp21->PLAT_54186_wa_chroma_addr_offset;
- } else
- flip_regs.DCSURF_PRIMARY_SURFACE_ADDRESS_C =
- address->video_progressive.chroma_addr.low_part;
+ flip_regs.DCSURF_PRIMARY_SURFACE_ADDRESS_C =
+ address->video_progressive.chroma_addr.low_part;
flip_regs.DCSURF_PRIMARY_SURFACE_ADDRESS_HIGH_C =
address->video_progressive.chroma_addr.high_part;
@@ -942,7 +826,6 @@ static struct hubp_funcs dcn21_hubp_funcs = {
.set_blank = hubp1_set_blank,
.dcc_control = hubp1_dcc_control,
.mem_program_viewport = hubp21_set_viewport,
- .apply_PLAT_54186_wa = hubp21_apply_PLAT_54186_wa,
.set_cursor_attributes = hubp2_cursor_set_attributes,
.set_cursor_position = hubp1_cursor_set_position,
.hubp_clk_cntl = hubp1_clk_cntl,
diff --git a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hwseq.c b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hwseq.c
index ada65b1a7eb1..01f1d3d9a639 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hwseq.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hwseq.c
@@ -28,10 +28,13 @@
#include "core_types.h"
#include "resource.h"
#include "dce/dce_hwseq.h"
+#include "dce110/dce110_hw_sequencer.h"
#include "dcn21_hwseq.h"
#include "vmid.h"
#include "reg_helper.h"
#include "hw/clk_mgr.h"
+#include "dc_dmub_srv.h"
+#include "abm.h"
#define DC_LOGGER_INIT(logger)
@@ -134,3 +137,89 @@ void dcn21_PLAT_58856_wa(struct dc_state *context, struct pipe_ctx *pipe_ctx)
pipe_ctx->stream->dpms_off = true;
}
+static bool dmub_abm_set_pipe(struct abm *abm, uint32_t otg_inst, uint32_t option, uint32_t panel_inst)
+{
+ union dmub_rb_cmd cmd;
+ struct dc_context *dc = abm->ctx;
+ uint32_t ramping_boundary = 0xFFFF;
+
+ cmd.abm_set_pipe.header.type = DMUB_CMD__ABM;
+ cmd.abm_set_pipe.header.sub_type = DMUB_CMD__ABM_SET_PIPE;
+ cmd.abm_set_pipe.abm_set_pipe_data.otg_inst = otg_inst;
+ cmd.abm_set_pipe.abm_set_pipe_data.set_pipe_option = option;
+ cmd.abm_set_pipe.abm_set_pipe_data.panel_inst = panel_inst;
+ cmd.abm_set_pipe.abm_set_pipe_data.ramping_boundary = ramping_boundary;
+ cmd.abm_set_pipe.header.payload_bytes = sizeof(struct dmub_cmd_abm_set_pipe_data);
+
+ dc_dmub_srv_cmd_queue(dc->dmub_srv, &cmd);
+ dc_dmub_srv_cmd_execute(dc->dmub_srv);
+ dc_dmub_srv_wait_idle(dc->dmub_srv);
+
+ return true;
+}
+
+void dcn21_set_abm_immediate_disable(struct pipe_ctx *pipe_ctx)
+{
+ struct abm *abm = pipe_ctx->stream_res.abm;
+ uint32_t otg_inst = pipe_ctx->stream_res.tg->inst;
+ struct panel_cntl *panel_cntl = pipe_ctx->stream->link->panel_cntl;
+
+ struct dmcu *dmcu = pipe_ctx->stream->ctx->dc->res_pool->dmcu;
+
+ if (dmcu) {
+ dce110_set_abm_immediate_disable(pipe_ctx);
+ return;
+ }
+
+ if (abm && panel_cntl)
+ dmub_abm_set_pipe(abm, otg_inst, SET_ABM_PIPE_IMMEDIATELY_DISABLE,
+ panel_cntl->inst);
+}
+
+void dcn21_set_pipe(struct pipe_ctx *pipe_ctx)
+{
+ struct abm *abm = pipe_ctx->stream_res.abm;
+ uint32_t otg_inst = pipe_ctx->stream_res.tg->inst;
+ struct panel_cntl *panel_cntl = pipe_ctx->stream->link->panel_cntl;
+ struct dmcu *dmcu = pipe_ctx->stream->ctx->dc->res_pool->dmcu;
+
+ if (dmcu) {
+ dce110_set_pipe(pipe_ctx);
+ return;
+ }
+
+ if (abm && panel_cntl)
+ dmub_abm_set_pipe(abm, otg_inst, SET_ABM_PIPE_NORMAL, panel_cntl->inst);
+}
+
+bool dcn21_set_backlight_level(struct pipe_ctx *pipe_ctx,
+ uint32_t backlight_pwm_u16_16,
+ uint32_t frame_ramp)
+{
+ union dmub_rb_cmd cmd;
+ struct dc_context *dc = pipe_ctx->stream->ctx;
+ struct abm *abm = pipe_ctx->stream_res.abm;
+ uint32_t otg_inst = pipe_ctx->stream_res.tg->inst;
+ struct panel_cntl *panel_cntl = pipe_ctx->stream->link->panel_cntl;
+
+ if (dc->dc->res_pool->dmcu) {
+ dce110_set_backlight_level(pipe_ctx, backlight_pwm_u16_16, frame_ramp);
+ return true;
+ }
+
+ if (abm && panel_cntl)
+ dmub_abm_set_pipe(abm, otg_inst, SET_ABM_PIPE_NORMAL, panel_cntl->inst);
+
+ cmd.abm_set_backlight.header.type = DMUB_CMD__ABM;
+ cmd.abm_set_backlight.header.sub_type = DMUB_CMD__ABM_SET_BACKLIGHT;
+ cmd.abm_set_backlight.abm_set_backlight_data.frame_ramp = frame_ramp;
+ cmd.abm_set_backlight.abm_set_backlight_data.backlight_user_level = backlight_pwm_u16_16;
+ cmd.abm_set_backlight.header.payload_bytes = sizeof(struct dmub_cmd_abm_set_backlight_data);
+
+ dc_dmub_srv_cmd_queue(dc->dmub_srv, &cmd);
+ dc_dmub_srv_cmd_execute(dc->dmub_srv);
+ dc_dmub_srv_wait_idle(dc->dmub_srv);
+
+ return true;
+}
+
diff --git a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hwseq.h b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hwseq.h
index 26bf24d3b59f..9e97747e57cd 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hwseq.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hwseq.h
@@ -47,4 +47,10 @@ void dcn21_optimize_pwr_state(
void dcn21_PLAT_58856_wa(struct dc_state *context,
struct pipe_ctx *pipe_ctx);
+void dcn21_set_pipe(struct pipe_ctx *pipe_ctx);
+void dcn21_set_abm_immediate_disable(struct pipe_ctx *pipe_ctx);
+bool dcn21_set_backlight_level(struct pipe_ctx *pipe_ctx,
+ uint32_t backlight_pwm_u16_16,
+ uint32_t frame_ramp);
+
#endif /* __DC_HWSS_DCN21_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_init.c b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_init.c
index a5baef7e7a7d..8575de1a8ad2 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_init.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_init.c
@@ -88,8 +88,9 @@ static const struct hw_sequencer_funcs dcn21_funcs = {
.get_vupdate_offset_from_vsync = dcn10_get_vupdate_offset_from_vsync,
.calc_vupdate_position = dcn10_calc_vupdate_position,
.power_down = dce110_power_down,
- .set_backlight_level = dce110_set_backlight_level,
- .set_abm_immediate_disable = dce110_set_abm_immediate_disable,
+ .set_backlight_level = dcn21_set_backlight_level,
+ .set_abm_immediate_disable = dcn21_set_abm_immediate_disable,
+ .set_pipe = dcn21_set_pipe,
};
static const struct hwseq_private_funcs dcn21_private_funcs = {
diff --git a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c
index f00a56835084..61b337267a72 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c
@@ -88,7 +88,6 @@
#include "dce/dmub_psr.h"
#include "dce/dmub_abm.h"
-#define SOC_BOUNDING_BOX_VALID false
#define DC_LOGGER_INIT(logger)
@@ -878,7 +877,6 @@ static const struct dc_debug_options debug_defaults_drv = {
.scl_reset_length10 = true,
.sanity_checks = true,
.disable_48mhz_pwrdwn = false,
- .nv12_iflip_vm_wa = true,
.usbc_combo_phy_reset_wa = true
};
@@ -1908,6 +1906,8 @@ static bool dcn21_resource_construct(
BREAK_TO_DEBUGGER();
goto create_fail;
}
+
+ dc->debug.dmub_command_table = false;
}
if (dc->config.disable_dmcu) {
diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/Makefile b/drivers/gpu/drm/amd/display/dc/dcn30/Makefile
new file mode 100644
index 000000000000..025637a83c3b
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dcn30/Makefile
@@ -0,0 +1,54 @@
+#
+# Copyright 2020 Advanced Micro Devices, Inc.
+#
+# Permission is hereby granted, free of charge, to any person obtaining a
+# copy of this software and associated documentation files (the "Software"),
+# to deal in the Software without restriction, including without limitation
+# the rights to use, copy, modify, merge, publish, distribute, sublicense,
+# and/or sell copies of the Software, and to permit persons to whom the
+# Software is furnished to do so, subject to the following conditions:
+#
+# The above copyright notice and this permission notice shall be included in
+# all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+# THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+# ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+# OTHER DEALINGS IN THE SOFTWARE.
+#
+# Authors: AMD
+#
+#
+
+
+DCN30 = dcn30_init.o dcn30_hubbub.o dcn30_hubp.o dcn30_dpp.o dcn30_optc.o \
+ dcn30_dccg.o dcn30_hwseq.o dcn30_mpc.o dcn30_vpg.o \
+ dcn30_afmt.o dcn30_dio_stream_encoder.o dcn30_dwb.o \
+ dcn30_dpp_cm.o dcn30_dwb_cm.o dcn30_cm_common.o dcn30_mmhubbub.o \
+ dcn30_dio_link_encoder.o dcn30_resource.o
+
+
+CFLAGS_$(AMDDALPATH)/dc/dcn30/dcn30_optc.o := -mhard-float -msse -mpreferred-stack-boundary=4
+
+CFLAGS_$(AMDDALPATH)/dc/dcn30/dcn30_resource.o := -mhard-float -msse
+ifdef CONFIG_CC_IS_GCC
+ifeq ($(call cc-ifversion, -lt, 0701, y), y)
+IS_OLD_GCC = 1
+endif
+endif
+
+ifdef IS_OLD_GCC
+# Stack alignment mismatch, proceed with caution.
+# GCC < 7.1 cannot compile code using `double` and -mpreferred-stack-boundary=3
+# (8B stack alignment).
+CFLAGS_$(AMDDALPATH)/dc/dcn30/dcn30_resource.o += -mpreferred-stack-boundary=4
+else
+CFLAGS_$(AMDDALPATH)/dc/dcn30/dcn30_resource.o += -msse2
+endif
+
+AMD_DAL_DCN30 = $(addprefix $(AMDDALPATH)/dc/dcn30/,$(DCN30))
+
+AMD_DISPLAY_FILES += $(AMD_DAL_DCN30)
diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_afmt.c b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_afmt.c
new file mode 100644
index 000000000000..2b08b1d72177
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_afmt.c
@@ -0,0 +1,206 @@
+/*
+ * Copyright 2020 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+
+#include "dc_bios_types.h"
+#include "dcn30_afmt.h"
+#include "reg_helper.h"
+
+#define DC_LOGGER \
+ afmt3->base.ctx->logger
+
+#define REG(reg)\
+ (afmt3->regs->reg)
+
+#undef FN
+#define FN(reg_name, field_name) \
+ afmt3->afmt_shift->field_name, afmt3->afmt_mask->field_name
+
+
+#define CTX \
+ afmt3->base.ctx
+
+
+static void afmt3_setup_hdmi_audio(
+ struct afmt *afmt)
+{
+ struct dcn30_afmt *afmt3 = DCN30_AFMT_FROM_AFMT(afmt);
+
+ /* AFMT_AUDIO_PACKET_CONTROL */
+ REG_UPDATE(AFMT_AUDIO_PACKET_CONTROL, AFMT_60958_CS_UPDATE, 1);
+
+ /* AFMT_AUDIO_PACKET_CONTROL2 */
+ REG_UPDATE_2(AFMT_AUDIO_PACKET_CONTROL2,
+ AFMT_AUDIO_LAYOUT_OVRD, 0,
+ AFMT_60958_OSF_OVRD, 0);
+
+ /* AFMT_60958_0__AFMT_60958_CS_CHANNEL_NUMBER_L_MASK &
+ * AFMT_60958_0__AFMT_60958_CS_CLOCK_ACCURACY_MASK
+ */
+ REG_UPDATE_2(AFMT_60958_0,
+ AFMT_60958_CS_CHANNEL_NUMBER_L, 1,
+ AFMT_60958_CS_CLOCK_ACCURACY, 0);
+
+ /* AFMT_60958_1 AFMT_60958_CS_CHALNNEL_NUMBER_R */
+ REG_UPDATE(AFMT_60958_1, AFMT_60958_CS_CHANNEL_NUMBER_R, 2);
+
+ /* AFMT_60958_2 now keep this settings until
+ * Programming guide comes out
+ */
+ REG_UPDATE_6(AFMT_60958_2,
+ AFMT_60958_CS_CHANNEL_NUMBER_2, 3,
+ AFMT_60958_CS_CHANNEL_NUMBER_3, 4,
+ AFMT_60958_CS_CHANNEL_NUMBER_4, 5,
+ AFMT_60958_CS_CHANNEL_NUMBER_5, 6,
+ AFMT_60958_CS_CHANNEL_NUMBER_6, 7,
+ AFMT_60958_CS_CHANNEL_NUMBER_7, 8);
+}
+
+static union audio_cea_channels speakers_to_channels(
+ struct audio_speaker_flags speaker_flags)
+{
+ union audio_cea_channels cea_channels = {0};
+
+ /* these are one to one */
+ cea_channels.channels.FL = speaker_flags.FL_FR;
+ cea_channels.channels.FR = speaker_flags.FL_FR;
+ cea_channels.channels.LFE = speaker_flags.LFE;
+ cea_channels.channels.FC = speaker_flags.FC;
+
+ /* if Rear Left and Right exist move RC speaker to channel 7
+ * otherwise to channel 5
+ */
+ if (speaker_flags.RL_RR) {
+ cea_channels.channels.RL_RC = speaker_flags.RL_RR;
+ cea_channels.channels.RR = speaker_flags.RL_RR;
+ cea_channels.channels.RC_RLC_FLC = speaker_flags.RC;
+ } else {
+ cea_channels.channels.RL_RC = speaker_flags.RC;
+ }
+
+ /* FRONT Left Right Center and REAR Left Right Center are exclusive */
+ if (speaker_flags.FLC_FRC) {
+ cea_channels.channels.RC_RLC_FLC = speaker_flags.FLC_FRC;
+ cea_channels.channels.RRC_FRC = speaker_flags.FLC_FRC;
+ } else {
+ cea_channels.channels.RC_RLC_FLC = speaker_flags.RLC_RRC;
+ cea_channels.channels.RRC_FRC = speaker_flags.RLC_RRC;
+ }
+
+ return cea_channels;
+}
+
+static void afmt3_se_audio_setup(
+ struct afmt *afmt,
+ unsigned int az_inst,
+ struct audio_info *audio_info)
+{
+ struct dcn30_afmt *afmt3 = DCN30_AFMT_FROM_AFMT(afmt);
+
+ uint32_t speakers = 0;
+ uint32_t channels = 0;
+
+ ASSERT(audio_info);
+ /* This should not happen.it does so we don't get BSOD*/
+ if (audio_info == NULL)
+ return;
+
+ speakers = audio_info->flags.info.ALLSPEAKERS;
+ channels = speakers_to_channels(audio_info->flags.speaker_flags).all;
+
+ /* setup the audio stream source select (audio -> dig mapping) */
+ REG_SET(AFMT_AUDIO_SRC_CONTROL, 0, AFMT_AUDIO_SRC_SELECT, az_inst);
+
+ /* Channel allocation */
+ REG_UPDATE(AFMT_AUDIO_PACKET_CONTROL2, AFMT_AUDIO_CHANNEL_ENABLE, channels);
+
+ /* Disable forced mem power off */
+ REG_UPDATE(AFMT_MEM_PWR, AFMT_MEM_PWR_FORCE, 0);
+}
+
+static void afmt3_audio_mute_control(
+ struct afmt *afmt,
+ bool mute)
+{
+ struct dcn30_afmt *afmt3 = DCN30_AFMT_FROM_AFMT(afmt);
+
+ /* enable/disable transmission of audio packets */
+ REG_UPDATE(AFMT_AUDIO_PACKET_CONTROL, AFMT_AUDIO_SAMPLE_SEND, !mute);
+}
+
+static void afmt3_audio_info_immediate_update(
+ struct afmt *afmt)
+{
+ struct dcn30_afmt *afmt3 = DCN30_AFMT_FROM_AFMT(afmt);
+
+ /* update double-buffered AUDIO_INFO registers immediately */
+ REG_UPDATE(AFMT_INFOFRAME_CONTROL0, AFMT_AUDIO_INFO_UPDATE, 1);
+}
+
+static void afmt3_setup_dp_audio(
+ struct afmt *afmt)
+{
+ struct dcn30_afmt *afmt3 = DCN30_AFMT_FROM_AFMT(afmt);
+
+ /* AFMT_AUDIO_PACKET_CONTROL */
+ REG_UPDATE(AFMT_AUDIO_PACKET_CONTROL, AFMT_60958_CS_UPDATE, 1);
+
+ /* AFMT_AUDIO_PACKET_CONTROL2 */
+ /* Program the ATP and AIP next */
+ REG_UPDATE_2(AFMT_AUDIO_PACKET_CONTROL2,
+ AFMT_AUDIO_LAYOUT_OVRD, 0,
+ AFMT_60958_OSF_OVRD, 0);
+
+ /* AFMT_INFOFRAME_CONTROL0 */
+ REG_UPDATE(AFMT_INFOFRAME_CONTROL0, AFMT_AUDIO_INFO_UPDATE, 1);
+
+ /* AFMT_60958_0__AFMT_60958_CS_CLOCK_ACCURACY_MASK */
+ REG_UPDATE(AFMT_60958_0, AFMT_60958_CS_CLOCK_ACCURACY, 0);
+}
+
+static struct afmt_funcs dcn30_afmt_funcs = {
+ .setup_hdmi_audio = afmt3_setup_hdmi_audio,
+ .se_audio_setup = afmt3_se_audio_setup,
+ .audio_mute_control = afmt3_audio_mute_control,
+ .audio_info_immediate_update = afmt3_audio_info_immediate_update,
+ .setup_dp_audio = afmt3_setup_dp_audio,
+};
+
+void afmt3_construct(struct dcn30_afmt *afmt3,
+ struct dc_context *ctx,
+ uint32_t inst,
+ const struct dcn30_afmt_registers *afmt_regs,
+ const struct dcn30_afmt_shift *afmt_shift,
+ const struct dcn30_afmt_mask *afmt_mask)
+{
+ afmt3->base.ctx = ctx;
+
+ afmt3->base.inst = inst;
+ afmt3->base.funcs = &dcn30_afmt_funcs;
+
+ afmt3->regs = afmt_regs;
+ afmt3->afmt_shift = afmt_shift;
+ afmt3->afmt_mask = afmt_mask;
+}
diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_afmt.h b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_afmt.h
new file mode 100644
index 000000000000..08b2d8a8170c
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_afmt.h
@@ -0,0 +1,230 @@
+/*
+ * Copyright 2020 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef __DAL_DCN30_AFMT_H__
+#define __DAL_DCN30_AFMT_H__
+
+
+#define DCN30_AFMT_FROM_AFMT(afmt)\
+ container_of(afmt, struct dcn30_afmt, base)
+
+#define AFMT_DCN3_REG_LIST(id) \
+ SRI(AFMT_INFOFRAME_CONTROL0, AFMT, id), \
+ SRI(AFMT_VBI_PACKET_CONTROL, AFMT, id), \
+ SRI(AFMT_AUDIO_PACKET_CONTROL, AFMT, id), \
+ SRI(AFMT_AUDIO_PACKET_CONTROL2, AFMT, id), \
+ SRI(AFMT_AUDIO_SRC_CONTROL, AFMT, id), \
+ SRI(AFMT_60958_0, AFMT, id), \
+ SRI(AFMT_60958_1, AFMT, id), \
+ SRI(AFMT_60958_2, AFMT, id), \
+ SRI(AFMT_MEM_PWR, AFMT, id)
+
+struct dcn30_afmt_registers {
+ uint32_t AFMT_INFOFRAME_CONTROL0;
+ uint32_t AFMT_VBI_PACKET_CONTROL;
+ uint32_t AFMT_AUDIO_PACKET_CONTROL;
+ uint32_t AFMT_AUDIO_PACKET_CONTROL2;
+ uint32_t AFMT_AUDIO_SRC_CONTROL;
+ uint32_t AFMT_60958_0;
+ uint32_t AFMT_60958_1;
+ uint32_t AFMT_60958_2;
+ uint32_t AFMT_MEM_PWR;
+};
+
+#define DCN3_AFMT_MASK_SH_LIST(mask_sh)\
+ SE_SF(AFMT0_AFMT_INFOFRAME_CONTROL0, AFMT_AUDIO_INFO_UPDATE, mask_sh),\
+ SE_SF(AFMT0_AFMT_AUDIO_SRC_CONTROL, AFMT_AUDIO_SRC_SELECT, mask_sh),\
+ SE_SF(AFMT0_AFMT_AUDIO_PACKET_CONTROL2, AFMT_AUDIO_CHANNEL_ENABLE, mask_sh),\
+ SE_SF(AFMT0_AFMT_AUDIO_PACKET_CONTROL, AFMT_60958_CS_UPDATE, mask_sh),\
+ SE_SF(AFMT0_AFMT_AUDIO_PACKET_CONTROL2, AFMT_AUDIO_LAYOUT_OVRD, mask_sh),\
+ SE_SF(AFMT0_AFMT_AUDIO_PACKET_CONTROL2, AFMT_60958_OSF_OVRD, mask_sh),\
+ SE_SF(AFMT0_AFMT_60958_0, AFMT_60958_CS_CHANNEL_NUMBER_L, mask_sh),\
+ SE_SF(AFMT0_AFMT_60958_0, AFMT_60958_CS_CLOCK_ACCURACY, mask_sh),\
+ SE_SF(AFMT0_AFMT_60958_1, AFMT_60958_CS_CHANNEL_NUMBER_R, mask_sh),\
+ SE_SF(AFMT0_AFMT_60958_2, AFMT_60958_CS_CHANNEL_NUMBER_2, mask_sh),\
+ SE_SF(AFMT0_AFMT_60958_2, AFMT_60958_CS_CHANNEL_NUMBER_3, mask_sh),\
+ SE_SF(AFMT0_AFMT_60958_2, AFMT_60958_CS_CHANNEL_NUMBER_4, mask_sh),\
+ SE_SF(AFMT0_AFMT_60958_2, AFMT_60958_CS_CHANNEL_NUMBER_5, mask_sh),\
+ SE_SF(AFMT0_AFMT_60958_2, AFMT_60958_CS_CHANNEL_NUMBER_6, mask_sh),\
+ SE_SF(AFMT0_AFMT_60958_2, AFMT_60958_CS_CHANNEL_NUMBER_7, mask_sh),\
+ SE_SF(AFMT0_AFMT_AUDIO_PACKET_CONTROL, AFMT_AUDIO_SAMPLE_SEND, mask_sh),\
+ SE_SF(AFMT0_AFMT_MEM_PWR, AFMT_MEM_PWR_FORCE, mask_sh)
+
+#define AFMT_DCN3_REG_FIELD_LIST(type) \
+ type AFMT_AUDIO_INFO_UPDATE;\
+ type AFMT_AUDIO_SRC_SELECT;\
+ type AFMT_AUDIO_CHANNEL_ENABLE;\
+ type AFMT_60958_CS_UPDATE;\
+ type AFMT_AUDIO_LAYOUT_OVRD;\
+ type AFMT_60958_OSF_OVRD;\
+ type AFMT_60958_CS_CHANNEL_NUMBER_L;\
+ type AFMT_60958_CS_CLOCK_ACCURACY;\
+ type AFMT_60958_CS_CHANNEL_NUMBER_R;\
+ type AFMT_60958_CS_CHANNEL_NUMBER_2;\
+ type AFMT_60958_CS_CHANNEL_NUMBER_3;\
+ type AFMT_60958_CS_CHANNEL_NUMBER_4;\
+ type AFMT_60958_CS_CHANNEL_NUMBER_5;\
+ type AFMT_60958_CS_CHANNEL_NUMBER_6;\
+ type AFMT_60958_CS_CHANNEL_NUMBER_7;\
+ type AFMT_AUDIO_SAMPLE_SEND;\
+ type AFMT_MEM_PWR_FORCE
+
+struct dcn30_afmt_shift {
+ AFMT_DCN3_REG_FIELD_LIST(uint8_t);
+};
+
+struct dcn30_afmt_mask {
+ AFMT_DCN3_REG_FIELD_LIST(uint32_t);
+};
+
+
+/**
+* speakersToChannels
+*
+* @brief
+* translate speakers to channels
+*
+* FL - Front Left
+* FR - Front Right
+* RL - Rear Left
+* RR - Rear Right
+* RC - Rear Center
+* FC - Front Center
+* FLC - Front Left Center
+* FRC - Front Right Center
+* RLC - Rear Left Center
+* RRC - Rear Right Center
+* LFE - Low Freq Effect
+*
+* FC
+* FLC FRC
+* FL FR
+*
+* LFE
+* ()
+*
+*
+* RL RR
+* RLC RRC
+* RC
+*
+* ch 8 7 6 5 4 3 2 1
+* 0b00000011 - - - - - - FR FL
+* 0b00000111 - - - - - LFE FR FL
+* 0b00001011 - - - - FC - FR FL
+* 0b00001111 - - - - FC LFE FR FL
+* 0b00010011 - - - RC - - FR FL
+* 0b00010111 - - - RC - LFE FR FL
+* 0b00011011 - - - RC FC - FR FL
+* 0b00011111 - - - RC FC LFE FR FL
+* 0b00110011 - - RR RL - - FR FL
+* 0b00110111 - - RR RL - LFE FR FL
+* 0b00111011 - - RR RL FC - FR FL
+* 0b00111111 - - RR RL FC LFE FR FL
+* 0b01110011 - RC RR RL - - FR FL
+* 0b01110111 - RC RR RL - LFE FR FL
+* 0b01111011 - RC RR RL FC - FR FL
+* 0b01111111 - RC RR RL FC LFE FR FL
+* 0b11110011 RRC RLC RR RL - - FR FL
+* 0b11110111 RRC RLC RR RL - LFE FR FL
+* 0b11111011 RRC RLC RR RL FC - FR FL
+* 0b11111111 RRC RLC RR RL FC LFE FR FL
+* 0b11000011 FRC FLC - - - - FR FL
+* 0b11000111 FRC FLC - - - LFE FR FL
+* 0b11001011 FRC FLC - - FC - FR FL
+* 0b11001111 FRC FLC - - FC LFE FR FL
+* 0b11010011 FRC FLC - RC - - FR FL
+* 0b11010111 FRC FLC - RC - LFE FR FL
+* 0b11011011 FRC FLC - RC FC - FR FL
+* 0b11011111 FRC FLC - RC FC LFE FR FL
+* 0b11110011 FRC FLC RR RL - - FR FL
+* 0b11110111 FRC FLC RR RL - LFE FR FL
+* 0b11111011 FRC FLC RR RL FC - FR FL
+* 0b11111111 FRC FLC RR RL FC LFE FR FL
+*
+* @param
+* speakers - speaker information as it comes from CEA audio block
+*/
+/* translate speakers to channels */
+
+union audio_cea_channels {
+ uint8_t all;
+ struct audio_cea_channels_bits {
+ uint32_t FL:1;
+ uint32_t FR:1;
+ uint32_t LFE:1;
+ uint32_t FC:1;
+ uint32_t RL_RC:1;
+ uint32_t RR:1;
+ uint32_t RC_RLC_FLC:1;
+ uint32_t RRC_FRC:1;
+ } channels;
+};
+
+struct afmt;
+
+struct afmt_funcs {
+
+ void (*setup_hdmi_audio)(
+ struct afmt *afmt);
+
+ void (*se_audio_setup)(
+ struct afmt *afmt,
+ unsigned int az_inst,
+ struct audio_info *audio_info);
+
+ void (*audio_mute_control)(
+ struct afmt *afmt,
+ bool mute);
+
+ void (*audio_info_immediate_update)(
+ struct afmt *afmt);
+
+ void (*setup_dp_audio)(
+ struct afmt *afmt);
+};
+
+struct afmt {
+ const struct afmt_funcs *funcs;
+ struct dc_context *ctx;
+ int inst;
+};
+
+struct dcn30_afmt {
+ struct afmt base;
+ const struct dcn30_afmt_registers *regs;
+ const struct dcn30_afmt_shift *afmt_shift;
+ const struct dcn30_afmt_mask *afmt_mask;
+};
+
+void afmt3_construct(struct dcn30_afmt *afmt3,
+ struct dc_context *ctx,
+ uint32_t inst,
+ const struct dcn30_afmt_registers *afmt_regs,
+ const struct dcn30_afmt_shift *afmt_shift,
+ const struct dcn30_afmt_mask *afmt_mask);
+
+
+#endif
diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_cm_common.c b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_cm_common.c
new file mode 100644
index 000000000000..a139a87a1a81
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_cm_common.c
@@ -0,0 +1,640 @@
+/*
+ * Copyright 2020 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#include "dm_services.h"
+#include "core_types.h"
+#include "reg_helper.h"
+#include "dcn30_dpp.h"
+#include "basics/conversion.h"
+#include "dcn30_cm_common.h"
+#include "custom_float.h"
+
+#define REG(reg) reg
+
+#define CTX \
+ ctx //dpp->base.ctx
+
+#undef FN
+#define FN(reg_name, field_name) \
+ reg->shifts.field_name, reg->masks.field_name
+
+void cm_helper_program_gamcor_xfer_func(
+ struct dc_context *ctx,
+ const struct pwl_params *params,
+ const struct dcn3_xfer_func_reg *reg)
+{
+ uint32_t reg_region_cur;
+ unsigned int i = 0;
+
+ REG_SET_2(reg->start_cntl_b, 0,
+ exp_region_start, params->corner_points[0].blue.custom_float_x,
+ exp_resion_start_segment, 0);
+ REG_SET_2(reg->start_cntl_g, 0,
+ exp_region_start, params->corner_points[0].green.custom_float_x,
+ exp_resion_start_segment, 0);
+ REG_SET_2(reg->start_cntl_r, 0,
+ exp_region_start, params->corner_points[0].red.custom_float_x,
+ exp_resion_start_segment, 0);
+
+ REG_SET(reg->start_slope_cntl_b, 0, //linear slope at start of curve
+ field_region_linear_slope, params->corner_points[0].blue.custom_float_slope);
+ REG_SET(reg->start_slope_cntl_g, 0,
+ field_region_linear_slope, params->corner_points[0].green.custom_float_slope);
+ REG_SET(reg->start_slope_cntl_r, 0,
+ field_region_linear_slope, params->corner_points[0].red.custom_float_slope);
+
+ REG_SET(reg->start_end_cntl1_b, 0,
+ field_region_end_base, params->corner_points[1].blue.custom_float_y);
+ REG_SET(reg->start_end_cntl1_g, 0,
+ field_region_end_base, params->corner_points[1].green.custom_float_y);
+ REG_SET(reg->start_end_cntl1_r, 0,
+ field_region_end_base, params->corner_points[1].red.custom_float_y);
+
+ REG_SET_2(reg->start_end_cntl2_b, 0,
+ field_region_end_slope, params->corner_points[1].blue.custom_float_slope,
+ field_region_end, params->corner_points[1].blue.custom_float_x);
+ REG_SET_2(reg->start_end_cntl2_g, 0,
+ field_region_end_slope, params->corner_points[1].green.custom_float_slope,
+ field_region_end, params->corner_points[1].green.custom_float_x);
+ REG_SET_2(reg->start_end_cntl2_r, 0,
+ field_region_end_slope, params->corner_points[1].red.custom_float_slope,
+ field_region_end, params->corner_points[1].red.custom_float_x);
+
+ for (reg_region_cur = reg->region_start;
+ reg_region_cur <= reg->region_end;
+ reg_region_cur++) {
+
+ const struct gamma_curve *curve0 = &(params->arr_curve_points[2 * i]);
+ const struct gamma_curve *curve1 = &(params->arr_curve_points[(2 * i) + 1]);
+
+ REG_SET_4(reg_region_cur, 0,
+ exp_region0_lut_offset, curve0->offset,
+ exp_region0_num_segments, curve0->segments_num,
+ exp_region1_lut_offset, curve1->offset,
+ exp_region1_num_segments, curve1->segments_num);
+
+ i++;
+ }
+}
+
+/* driver uses 32 regions or less, but DCN HW has 34, extra 2 are set to 0 */
+#define MAX_REGIONS_NUMBER 34
+#define MAX_LOW_POINT 25
+#define NUMBER_REGIONS 32
+#define NUMBER_SW_SEGMENTS 16
+
+bool cm3_helper_translate_curve_to_hw_format(
+ const struct dc_transfer_func *output_tf,
+ struct pwl_params *lut_params, bool fixpoint)
+{
+ struct curve_points3 *corner_points;
+ struct pwl_result_data *rgb_resulted;
+ struct pwl_result_data *rgb;
+ struct pwl_result_data *rgb_plus_1;
+ struct fixed31_32 end_value;
+
+ int32_t region_start, region_end;
+ int32_t i;
+ uint32_t j, k, seg_distr[MAX_REGIONS_NUMBER], increment, start_index, hw_points;
+
+ if (output_tf == NULL || lut_params == NULL || output_tf->type == TF_TYPE_BYPASS)
+ return false;
+
+ PERF_TRACE_CTX(output_tf->ctx);
+
+ corner_points = lut_params->corner_points;
+ rgb_resulted = lut_params->rgb_resulted;
+ hw_points = 0;
+
+ memset(lut_params, 0, sizeof(struct pwl_params));
+ memset(seg_distr, 0, sizeof(seg_distr));
+
+ if (output_tf->tf == TRANSFER_FUNCTION_PQ || output_tf->tf == TRANSFER_FUNCTION_GAMMA22 ||
+ output_tf->tf == TRANSFER_FUNCTION_HLG) {
+ /* 32 segments
+ * segments are from 2^-25 to 2^7
+ */
+ for (i = 0; i < NUMBER_REGIONS ; i++)
+ seg_distr[i] = 3;
+
+ region_start = -MAX_LOW_POINT;
+ region_end = NUMBER_REGIONS - MAX_LOW_POINT;
+ } else {
+ /* 10 segments
+ * segment is from 2^-10 to 2^0
+ * There are less than 256 points, for optimization
+ */
+ seg_distr[0] = 3;
+ seg_distr[1] = 4;
+ seg_distr[2] = 4;
+ seg_distr[3] = 4;
+ seg_distr[4] = 4;
+ seg_distr[5] = 4;
+ seg_distr[6] = 4;
+ seg_distr[7] = 4;
+ seg_distr[8] = 4;
+ seg_distr[9] = 4;
+
+ region_start = -10;
+ region_end = 0;
+ }
+
+ for (i = region_end - region_start; i < MAX_REGIONS_NUMBER ; i++)
+ seg_distr[i] = -1;
+
+ for (k = 0; k < MAX_REGIONS_NUMBER; k++) {
+ if (seg_distr[k] != -1)
+ hw_points += (1 << seg_distr[k]);
+ }
+
+ j = 0;
+ for (k = 0; k < (region_end - region_start); k++) {
+ increment = NUMBER_SW_SEGMENTS / (1 << seg_distr[k]);
+ start_index = (region_start + k + MAX_LOW_POINT) *
+ NUMBER_SW_SEGMENTS;
+ for (i = start_index; i < start_index + NUMBER_SW_SEGMENTS;
+ i += increment) {
+ if (j == hw_points - 1)
+ break;
+ rgb_resulted[j].red = output_tf->tf_pts.red[i];
+ rgb_resulted[j].green = output_tf->tf_pts.green[i];
+ rgb_resulted[j].blue = output_tf->tf_pts.blue[i];
+ j++;
+ }
+ }
+
+ /* last point */
+ start_index = (region_end + MAX_LOW_POINT) * NUMBER_SW_SEGMENTS;
+ rgb_resulted[hw_points - 1].red = output_tf->tf_pts.red[start_index];
+ rgb_resulted[hw_points - 1].green = output_tf->tf_pts.green[start_index];
+ rgb_resulted[hw_points - 1].blue = output_tf->tf_pts.blue[start_index];
+
+ // All 3 color channels have same x
+ corner_points[0].red.x = dc_fixpt_pow(dc_fixpt_from_int(2),
+ dc_fixpt_from_int(region_start));
+ corner_points[0].green.x = corner_points[0].red.x;
+ corner_points[0].blue.x = corner_points[0].red.x;
+
+ corner_points[1].red.x = dc_fixpt_pow(dc_fixpt_from_int(2),
+ dc_fixpt_from_int(region_end));
+ corner_points[1].green.x = corner_points[1].red.x;
+ corner_points[1].blue.x = corner_points[1].red.x;
+
+ corner_points[0].red.y = rgb_resulted[0].red;
+ corner_points[0].green.y = rgb_resulted[0].green;
+ corner_points[0].blue.y = rgb_resulted[0].blue;
+
+ corner_points[0].red.slope = dc_fixpt_div(corner_points[0].red.y,
+ corner_points[0].red.x);
+ corner_points[0].green.slope = dc_fixpt_div(corner_points[0].green.y,
+ corner_points[0].green.x);
+ corner_points[0].blue.slope = dc_fixpt_div(corner_points[0].blue.y,
+ corner_points[0].blue.x);
+
+ /* see comment above, m_arrPoints[1].y should be the Y value for the
+ * region end (m_numOfHwPoints), not last HW point(m_numOfHwPoints - 1)
+ */
+ corner_points[1].red.y = rgb_resulted[hw_points - 1].red;
+ corner_points[1].green.y = rgb_resulted[hw_points - 1].green;
+ corner_points[1].blue.y = rgb_resulted[hw_points - 1].blue;
+ corner_points[1].red.slope = dc_fixpt_zero;
+ corner_points[1].green.slope = dc_fixpt_zero;
+ corner_points[1].blue.slope = dc_fixpt_zero;
+
+ if (output_tf->tf == TRANSFER_FUNCTION_PQ || output_tf->tf == TRANSFER_FUNCTION_HLG) {
+ /* for PQ/HLG, we want to have a straight line from last HW X point,
+ * and the slope to be such that we hit 1.0 at 10000/1000 nits.
+ */
+
+ if (output_tf->tf == TRANSFER_FUNCTION_PQ)
+ end_value = dc_fixpt_from_int(125);
+ else
+ end_value = dc_fixpt_from_fraction(125, 10);
+
+ corner_points[1].red.slope = dc_fixpt_div(
+ dc_fixpt_sub(dc_fixpt_one, corner_points[1].red.y),
+ dc_fixpt_sub(end_value, corner_points[1].red.x));
+ corner_points[1].green.slope = dc_fixpt_div(
+ dc_fixpt_sub(dc_fixpt_one, corner_points[1].green.y),
+ dc_fixpt_sub(end_value, corner_points[1].green.x));
+ corner_points[1].blue.slope = dc_fixpt_div(
+ dc_fixpt_sub(dc_fixpt_one, corner_points[1].blue.y),
+ dc_fixpt_sub(end_value, corner_points[1].blue.x));
+ }
+ lut_params->hw_points_num = hw_points;
+
+ k = 0;
+ for (i = 1; i < MAX_REGIONS_NUMBER; i++) {
+ if (seg_distr[k] != -1) {
+ lut_params->arr_curve_points[k].segments_num =
+ seg_distr[k];
+ lut_params->arr_curve_points[i].offset =
+ lut_params->arr_curve_points[k].offset + (1 << seg_distr[k]);
+ }
+ k++;
+ }
+
+ if (seg_distr[k] != -1)
+ lut_params->arr_curve_points[k].segments_num = seg_distr[k];
+
+ rgb = rgb_resulted;
+ rgb_plus_1 = rgb_resulted + 1;
+
+ i = 1;
+ while (i != hw_points + 1) {
+ if (dc_fixpt_lt(rgb_plus_1->red, rgb->red))
+ rgb_plus_1->red = rgb->red;
+ if (dc_fixpt_lt(rgb_plus_1->green, rgb->green))
+ rgb_plus_1->green = rgb->green;
+ if (dc_fixpt_lt(rgb_plus_1->blue, rgb->blue))
+ rgb_plus_1->blue = rgb->blue;
+
+ rgb->delta_red = dc_fixpt_sub(rgb_plus_1->red, rgb->red);
+ rgb->delta_green = dc_fixpt_sub(rgb_plus_1->green, rgb->green);
+ rgb->delta_blue = dc_fixpt_sub(rgb_plus_1->blue, rgb->blue);
+
+ if (fixpoint == true) {
+ rgb->delta_red_reg = dc_fixpt_clamp_u0d10(rgb->delta_red);
+ rgb->delta_green_reg = dc_fixpt_clamp_u0d10(rgb->delta_green);
+ rgb->delta_blue_reg = dc_fixpt_clamp_u0d10(rgb->delta_blue);
+ rgb->red_reg = dc_fixpt_clamp_u0d14(rgb->red);
+ rgb->green_reg = dc_fixpt_clamp_u0d14(rgb->green);
+ rgb->blue_reg = dc_fixpt_clamp_u0d14(rgb->blue);
+ }
+
+ ++rgb_plus_1;
+ ++rgb;
+ ++i;
+ }
+ cm3_helper_convert_to_custom_float(rgb_resulted,
+ lut_params->corner_points,
+ hw_points, fixpoint);
+
+ return true;
+}
+
+#define NUM_DEGAMMA_REGIONS 12
+
+
+bool cm3_helper_translate_curve_to_degamma_hw_format(
+ const struct dc_transfer_func *output_tf,
+ struct pwl_params *lut_params)
+{
+ struct curve_points3 *corner_points;
+ struct pwl_result_data *rgb_resulted;
+ struct pwl_result_data *rgb;
+ struct pwl_result_data *rgb_plus_1;
+
+ int32_t region_start, region_end;
+ int32_t i;
+ uint32_t j, k, seg_distr[MAX_REGIONS_NUMBER], increment, start_index, hw_points;
+
+ if (output_tf == NULL || lut_params == NULL || output_tf->type == TF_TYPE_BYPASS)
+ return false;
+
+ PERF_TRACE_CTX(output_tf->ctx);
+
+ corner_points = lut_params->corner_points;
+ rgb_resulted = lut_params->rgb_resulted;
+ hw_points = 0;
+
+ memset(lut_params, 0, sizeof(struct pwl_params));
+ memset(seg_distr, 0, sizeof(seg_distr));
+
+ region_start = -NUM_DEGAMMA_REGIONS;
+ region_end = 0;
+
+
+ for (i = region_end - region_start; i < MAX_REGIONS_NUMBER ; i++)
+ seg_distr[i] = -1;
+ /* 12 segments
+ * segments are from 2^-12 to 0
+ */
+ for (i = 0; i < NUM_DEGAMMA_REGIONS ; i++)
+ seg_distr[i] = 4;
+
+ for (k = 0; k < MAX_REGIONS_NUMBER; k++) {
+ if (seg_distr[k] != -1)
+ hw_points += (1 << seg_distr[k]);
+ }
+
+ j = 0;
+ for (k = 0; k < (region_end - region_start); k++) {
+ increment = NUMBER_SW_SEGMENTS / (1 << seg_distr[k]);
+ start_index = (region_start + k + MAX_LOW_POINT) *
+ NUMBER_SW_SEGMENTS;
+ for (i = start_index; i < start_index + NUMBER_SW_SEGMENTS;
+ i += increment) {
+ if (j == hw_points - 1)
+ break;
+ rgb_resulted[j].red = output_tf->tf_pts.red[i];
+ rgb_resulted[j].green = output_tf->tf_pts.green[i];
+ rgb_resulted[j].blue = output_tf->tf_pts.blue[i];
+ j++;
+ }
+ }
+
+ /* last point */
+ start_index = (region_end + MAX_LOW_POINT) * NUMBER_SW_SEGMENTS;
+ rgb_resulted[hw_points - 1].red = output_tf->tf_pts.red[start_index];
+ rgb_resulted[hw_points - 1].green = output_tf->tf_pts.green[start_index];
+ rgb_resulted[hw_points - 1].blue = output_tf->tf_pts.blue[start_index];
+
+ corner_points[0].red.x = dc_fixpt_pow(dc_fixpt_from_int(2),
+ dc_fixpt_from_int(region_start));
+ corner_points[0].green.x = corner_points[0].red.x;
+ corner_points[0].blue.x = corner_points[0].red.x;
+ corner_points[1].red.x = dc_fixpt_pow(dc_fixpt_from_int(2),
+ dc_fixpt_from_int(region_end));
+ corner_points[1].green.x = corner_points[1].red.x;
+ corner_points[1].blue.x = corner_points[1].red.x;
+
+ corner_points[0].red.y = rgb_resulted[0].red;
+ corner_points[0].green.y = rgb_resulted[0].green;
+ corner_points[0].blue.y = rgb_resulted[0].blue;
+
+ /* see comment above, m_arrPoints[1].y should be the Y value for the
+ * region end (m_numOfHwPoints), not last HW point(m_numOfHwPoints - 1)
+ */
+ corner_points[1].red.y = rgb_resulted[hw_points - 1].red;
+ corner_points[1].green.y = rgb_resulted[hw_points - 1].green;
+ corner_points[1].blue.y = rgb_resulted[hw_points - 1].blue;
+ corner_points[1].red.slope = dc_fixpt_zero;
+ corner_points[1].green.slope = dc_fixpt_zero;
+ corner_points[1].blue.slope = dc_fixpt_zero;
+
+ if (output_tf->tf == TRANSFER_FUNCTION_PQ) {
+ /* for PQ, we want to have a straight line from last HW X point,
+ * and the slope to be such that we hit 1.0 at 10000 nits.
+ */
+ const struct fixed31_32 end_value =
+ dc_fixpt_from_int(125);
+
+ corner_points[1].red.slope = dc_fixpt_div(
+ dc_fixpt_sub(dc_fixpt_one, corner_points[1].red.y),
+ dc_fixpt_sub(end_value, corner_points[1].red.x));
+ corner_points[1].green.slope = dc_fixpt_div(
+ dc_fixpt_sub(dc_fixpt_one, corner_points[1].green.y),
+ dc_fixpt_sub(end_value, corner_points[1].green.x));
+ corner_points[1].blue.slope = dc_fixpt_div(
+ dc_fixpt_sub(dc_fixpt_one, corner_points[1].blue.y),
+ dc_fixpt_sub(end_value, corner_points[1].blue.x));
+ }
+
+ lut_params->hw_points_num = hw_points;
+
+ k = 0;
+ for (i = 1; i < MAX_REGIONS_NUMBER; i++) {
+ if (seg_distr[k] != -1) {
+ lut_params->arr_curve_points[k].segments_num =
+ seg_distr[k];
+ lut_params->arr_curve_points[i].offset =
+ lut_params->arr_curve_points[k].offset + (1 << seg_distr[k]);
+ }
+ k++;
+ }
+
+ if (seg_distr[k] != -1)
+ lut_params->arr_curve_points[k].segments_num = seg_distr[k];
+
+ rgb = rgb_resulted;
+ rgb_plus_1 = rgb_resulted + 1;
+
+ i = 1;
+ while (i != hw_points + 1) {
+ if (dc_fixpt_lt(rgb_plus_1->red, rgb->red))
+ rgb_plus_1->red = rgb->red;
+ if (dc_fixpt_lt(rgb_plus_1->green, rgb->green))
+ rgb_plus_1->green = rgb->green;
+ if (dc_fixpt_lt(rgb_plus_1->blue, rgb->blue))
+ rgb_plus_1->blue = rgb->blue;
+
+ rgb->delta_red = dc_fixpt_sub(rgb_plus_1->red, rgb->red);
+ rgb->delta_green = dc_fixpt_sub(rgb_plus_1->green, rgb->green);
+ rgb->delta_blue = dc_fixpt_sub(rgb_plus_1->blue, rgb->blue);
+
+ ++rgb_plus_1;
+ ++rgb;
+ ++i;
+ }
+ cm3_helper_convert_to_custom_float(rgb_resulted,
+ lut_params->corner_points,
+ hw_points, false);
+
+ return true;
+}
+
+bool cm3_helper_convert_to_custom_float(
+ struct pwl_result_data *rgb_resulted,
+ struct curve_points3 *corner_points,
+ uint32_t hw_points_num,
+ bool fixpoint)
+{
+ struct custom_float_format fmt;
+
+ struct pwl_result_data *rgb = rgb_resulted;
+
+ uint32_t i = 0;
+
+ fmt.exponenta_bits = 6;
+ fmt.mantissa_bits = 12;
+ fmt.sign = false;
+
+ /* corner_points[0] - beginning base, slope offset for R,G,B
+ * corner_points[1] - end base, slope offset for R,G,B
+ */
+ if (!convert_to_custom_float_format(corner_points[0].red.x, &fmt,
+ &corner_points[0].red.custom_float_x)) {
+ BREAK_TO_DEBUGGER();
+ return false;
+ }
+ if (!convert_to_custom_float_format(corner_points[0].green.x, &fmt,
+ &corner_points[0].green.custom_float_x)) {
+ BREAK_TO_DEBUGGER();
+ return false;
+ }
+ if (!convert_to_custom_float_format(corner_points[0].blue.x, &fmt,
+ &corner_points[0].blue.custom_float_x)) {
+ BREAK_TO_DEBUGGER();
+ return false;
+ }
+
+ if (!convert_to_custom_float_format(corner_points[0].red.offset, &fmt,
+ &corner_points[0].red.custom_float_offset)) {
+ BREAK_TO_DEBUGGER();
+ return false;
+ }
+ if (!convert_to_custom_float_format(corner_points[0].green.offset, &fmt,
+ &corner_points[0].green.custom_float_offset)) {
+ BREAK_TO_DEBUGGER();
+ return false;
+ }
+ if (!convert_to_custom_float_format(corner_points[0].blue.offset, &fmt,
+ &corner_points[0].blue.custom_float_offset)) {
+ BREAK_TO_DEBUGGER();
+ return false;
+ }
+
+ if (!convert_to_custom_float_format(corner_points[0].red.slope, &fmt,
+ &corner_points[0].red.custom_float_slope)) {
+ BREAK_TO_DEBUGGER();
+ return false;
+ }
+ if (!convert_to_custom_float_format(corner_points[0].green.slope, &fmt,
+ &corner_points[0].green.custom_float_slope)) {
+ BREAK_TO_DEBUGGER();
+ return false;
+ }
+ if (!convert_to_custom_float_format(corner_points[0].blue.slope, &fmt,
+ &corner_points[0].blue.custom_float_slope)) {
+ BREAK_TO_DEBUGGER();
+ return false;
+ }
+
+ if (fixpoint == true) {
+ corner_points[1].red.custom_float_y =
+ dc_fixpt_clamp_u0d14(corner_points[1].red.y);
+ corner_points[1].green.custom_float_y =
+ dc_fixpt_clamp_u0d14(corner_points[1].green.y);
+ corner_points[1].blue.custom_float_y =
+ dc_fixpt_clamp_u0d14(corner_points[1].blue.y);
+ } else {
+ if (!convert_to_custom_float_format(corner_points[1].red.y,
+ &fmt, &corner_points[1].red.custom_float_y)) {
+ BREAK_TO_DEBUGGER();
+ return false;
+ }
+ if (!convert_to_custom_float_format(corner_points[1].green.y,
+ &fmt, &corner_points[1].green.custom_float_y)) {
+ BREAK_TO_DEBUGGER();
+ return false;
+ }
+ if (!convert_to_custom_float_format(corner_points[1].blue.y,
+ &fmt, &corner_points[1].blue.custom_float_y)) {
+ BREAK_TO_DEBUGGER();
+ return false;
+ }
+ }
+
+ fmt.mantissa_bits = 10;
+ fmt.sign = false;
+
+ if (!convert_to_custom_float_format(corner_points[1].red.x, &fmt,
+ &corner_points[1].red.custom_float_x)) {
+ BREAK_TO_DEBUGGER();
+ return false;
+ }
+ if (!convert_to_custom_float_format(corner_points[1].green.x, &fmt,
+ &corner_points[1].green.custom_float_x)) {
+ BREAK_TO_DEBUGGER();
+ return false;
+ }
+ if (!convert_to_custom_float_format(corner_points[1].blue.x, &fmt,
+ &corner_points[1].blue.custom_float_x)) {
+ BREAK_TO_DEBUGGER();
+ return false;
+ }
+
+ if (!convert_to_custom_float_format(corner_points[1].red.slope, &fmt,
+ &corner_points[1].red.custom_float_slope)) {
+ BREAK_TO_DEBUGGER();
+ return false;
+ }
+ if (!convert_to_custom_float_format(corner_points[1].green.slope, &fmt,
+ &corner_points[1].green.custom_float_slope)) {
+ BREAK_TO_DEBUGGER();
+ return false;
+ }
+ if (!convert_to_custom_float_format(corner_points[1].blue.slope, &fmt,
+ &corner_points[1].blue.custom_float_slope)) {
+ BREAK_TO_DEBUGGER();
+ return false;
+ }
+
+ if (hw_points_num == 0 || rgb_resulted == NULL || fixpoint == true)
+ return true;
+
+ fmt.mantissa_bits = 12;
+
+ while (i != hw_points_num) {
+ if (!convert_to_custom_float_format(rgb->red, &fmt,
+ &rgb->red_reg)) {
+ BREAK_TO_DEBUGGER();
+ return false;
+ }
+
+ if (!convert_to_custom_float_format(rgb->green, &fmt,
+ &rgb->green_reg)) {
+ BREAK_TO_DEBUGGER();
+ return false;
+ }
+
+ if (!convert_to_custom_float_format(rgb->blue, &fmt,
+ &rgb->blue_reg)) {
+ BREAK_TO_DEBUGGER();
+ return false;
+ }
+
+ if (!convert_to_custom_float_format(rgb->delta_red, &fmt,
+ &rgb->delta_red_reg)) {
+ BREAK_TO_DEBUGGER();
+ return false;
+ }
+
+ if (!convert_to_custom_float_format(rgb->delta_green, &fmt,
+ &rgb->delta_green_reg)) {
+ BREAK_TO_DEBUGGER();
+ return false;
+ }
+
+ if (!convert_to_custom_float_format(rgb->delta_blue, &fmt,
+ &rgb->delta_blue_reg)) {
+ BREAK_TO_DEBUGGER();
+ return false;
+ }
+
+ ++rgb;
+ ++i;
+ }
+
+ return true;
+}
+
+bool is_rgb_equal(const struct pwl_result_data *rgb, uint32_t num)
+{
+ uint32_t i;
+ bool ret = true;
+
+ for (i = 0 ; i < num; i++) {
+ if (rgb[i].red_reg != rgb[i].green_reg ||
+ rgb[i].blue_reg != rgb[i].red_reg ||
+ rgb[i].blue_reg != rgb[i].green_reg) {
+ ret = false;
+ break;
+ }
+ }
+ return ret;
+}
+
diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_cm_common.h b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_cm_common.h
new file mode 100644
index 000000000000..bd98b327a6c7
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_cm_common.h
@@ -0,0 +1,78 @@
+/*
+ * Copyright 2020 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#include "dcn10/dcn10_cm_common.h"
+
+#ifndef __DAL_DCN30_CM_COMMON_H__
+#define __DAL_DCN30_CM_COMMON_H__
+
+#define TF_HELPER_REG_FIELD_LIST_DCN3(type) \
+ TF_HELPER_REG_FIELD_LIST(type);\
+ type field_region_start_base;\
+ type field_offset
+
+struct DCN3_xfer_func_shift {
+ TF_HELPER_REG_FIELD_LIST_DCN3(uint8_t);
+};
+
+struct DCN3_xfer_func_mask {
+ TF_HELPER_REG_FIELD_LIST_DCN3(uint32_t);
+};
+
+struct dcn3_xfer_func_reg {
+ struct DCN3_xfer_func_shift shifts;
+ struct DCN3_xfer_func_mask masks;
+
+ TF_HELPER_REG_LIST;
+ uint32_t offset_b;
+ uint32_t offset_g;
+ uint32_t offset_r;
+ uint32_t start_base_cntl_b;
+ uint32_t start_base_cntl_g;
+ uint32_t start_base_cntl_r;
+};
+
+void cm_helper_program_gamcor_xfer_func(
+ struct dc_context *ctx,
+ const struct pwl_params *params,
+ const struct dcn3_xfer_func_reg *reg);
+
+bool cm3_helper_translate_curve_to_hw_format(
+ const struct dc_transfer_func *output_tf,
+ struct pwl_params *lut_params, bool fixpoint);
+
+bool cm3_helper_translate_curve_to_degamma_hw_format(
+ const struct dc_transfer_func *output_tf,
+ struct pwl_params *lut_params);
+
+bool cm3_helper_convert_to_custom_float(
+ struct pwl_result_data *rgb_resulted,
+ struct curve_points3 *corner_points,
+ uint32_t hw_points_num,
+ bool fixpoint);
+
+bool is_rgb_equal(const struct pwl_result_data *rgb, uint32_t num);
+
+#endif
diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dccg.c b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dccg.c
new file mode 100644
index 000000000000..b822a13e40ce
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dccg.c
@@ -0,0 +1,100 @@
+/*
+ * Copyright 2020 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#include "reg_helper.h"
+#include "core_types.h"
+#include "dcn30_dccg.h"
+
+#define TO_DCN_DCCG(dccg)\
+ container_of(dccg, struct dcn_dccg, base)
+
+#define REG(reg) \
+ (dccg_dcn->regs->reg)
+
+#undef FN
+#define FN(reg_name, field_name) \
+ dccg_dcn->dccg_shift->field_name, dccg_dcn->dccg_mask->field_name
+
+#define CTX \
+ dccg_dcn->base.ctx
+#define DC_LOGGER \
+ dccg->ctx->logger
+
+
+static const struct dccg_funcs dccg3_funcs = {
+ .update_dpp_dto = dccg2_update_dpp_dto,
+ .get_dccg_ref_freq = dccg2_get_dccg_ref_freq,
+ .dccg_init = dccg2_init
+};
+
+struct dccg *dccg3_create(
+ struct dc_context *ctx,
+ const struct dccg_registers *regs,
+ const struct dccg_shift *dccg_shift,
+ const struct dccg_mask *dccg_mask)
+{
+ struct dcn_dccg *dccg_dcn = kzalloc(sizeof(*dccg_dcn), GFP_KERNEL);
+ struct dccg *base;
+
+ if (dccg_dcn == NULL) {
+ BREAK_TO_DEBUGGER();
+ return NULL;
+ }
+
+ base = &dccg_dcn->base;
+ base->ctx = ctx;
+ base->funcs = &dccg3_funcs;
+
+ dccg_dcn->regs = regs;
+ dccg_dcn->dccg_shift = dccg_shift;
+ dccg_dcn->dccg_mask = dccg_mask;
+
+ return &dccg_dcn->base;
+}
+
+struct dccg *dccg30_create(
+ struct dc_context *ctx,
+ const struct dccg_registers *regs,
+ const struct dccg_shift *dccg_shift,
+ const struct dccg_mask *dccg_mask)
+{
+ struct dcn_dccg *dccg_dcn = kzalloc(sizeof(*dccg_dcn), GFP_KERNEL);
+ struct dccg *base;
+
+ if (dccg_dcn == NULL) {
+ BREAK_TO_DEBUGGER();
+ return NULL;
+ }
+
+ base = &dccg_dcn->base;
+ base->ctx = ctx;
+ base->funcs = &dccg3_funcs;
+
+ dccg_dcn->regs = regs;
+ dccg_dcn->dccg_shift = dccg_shift;
+ dccg_dcn->dccg_mask = dccg_mask;
+
+ return &dccg_dcn->base;
+}
diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dccg.h b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dccg.h
new file mode 100644
index 000000000000..029dda13a464
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dccg.h
@@ -0,0 +1,66 @@
+/*
+ * Copyright 2020 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef __DCN30_DCCG_H__
+#define __DCN30_DCCG_H__
+
+#include "dcn20/dcn20_dccg.h"
+
+
+#define DCCG_REG_LIST_DCN3AG() \
+ DCCG_COMMON_REG_LIST_DCN_BASE(),\
+ SR(PHYASYMCLK_CLOCK_CNTL),\
+ SR(PHYBSYMCLK_CLOCK_CNTL),\
+ SR(PHYCSYMCLK_CLOCK_CNTL)
+
+
+#define DCCG_REG_LIST_DCN30() \
+ DCCG_REG_LIST_DCN2(),\
+ SR(PHYASYMCLK_CLOCK_CNTL),\
+ SR(PHYBSYMCLK_CLOCK_CNTL),\
+ SR(PHYCSYMCLK_CLOCK_CNTL)
+
+#define DCCG_MASK_SH_LIST_DCN3(mask_sh) \
+ DCCG_MASK_SH_LIST_DCN2(mask_sh),\
+ DCCG_SF(PHYASYMCLK_CLOCK_CNTL, PHYASYMCLK_FORCE_EN, mask_sh),\
+ DCCG_SF(PHYASYMCLK_CLOCK_CNTL, PHYASYMCLK_FORCE_SRC_SEL, mask_sh),\
+ DCCG_SF(PHYBSYMCLK_CLOCK_CNTL, PHYBSYMCLK_FORCE_EN, mask_sh),\
+ DCCG_SF(PHYBSYMCLK_CLOCK_CNTL, PHYBSYMCLK_FORCE_SRC_SEL, mask_sh),\
+ DCCG_SF(PHYCSYMCLK_CLOCK_CNTL, PHYCSYMCLK_FORCE_EN, mask_sh),\
+ DCCG_SF(PHYCSYMCLK_CLOCK_CNTL, PHYCSYMCLK_FORCE_SRC_SEL, mask_sh)
+
+struct dccg *dccg3_create(
+ struct dc_context *ctx,
+ const struct dccg_registers *regs,
+ const struct dccg_shift *dccg_shift,
+ const struct dccg_mask *dccg_mask);
+
+struct dccg *dccg30_create(
+ struct dc_context *ctx,
+ const struct dccg_registers *regs,
+ const struct dccg_shift *dccg_shift,
+ const struct dccg_mask *dccg_mask);
+
+#endif //__DCN30_DCCG_H__
diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dio_link_encoder.c b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dio_link_encoder.c
new file mode 100644
index 000000000000..c29326e9856a
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dio_link_encoder.c
@@ -0,0 +1,205 @@
+/*
+ * Copyright 2020 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#include "reg_helper.h"
+
+#include "core_types.h"
+#include "link_encoder.h"
+#include "dcn30_dio_link_encoder.h"
+#include "stream_encoder.h"
+#include "i2caux_interface.h"
+#include "dc_bios_types.h"
+/* #include "dcn3ag/dcn3ag_phy_fw.h" */
+
+#include "gpio_service_interface.h"
+
+#define CTX \
+ enc10->base.ctx
+#define DC_LOGGER \
+ enc10->base.ctx->logger
+
+#define REG(reg)\
+ (enc10->link_regs->reg)
+
+#undef FN
+#define FN(reg_name, field_name) \
+ enc10->link_shift->field_name, enc10->link_mask->field_name
+
+#define IND_REG(index) \
+ (enc10->link_regs->index)
+
+
+static bool dcn30_link_encoder_validate_output_with_stream(
+ struct link_encoder *enc,
+ const struct dc_stream_state *stream)
+{
+ return dcn10_link_encoder_validate_output_with_stream(enc, stream);
+}
+
+static const struct link_encoder_funcs dcn30_link_enc_funcs = {
+ .read_state = link_enc2_read_state,
+ .validate_output_with_stream =
+ dcn30_link_encoder_validate_output_with_stream,
+ .hw_init = enc2_hw_init,
+ .setup = dcn10_link_encoder_setup,
+ .enable_tmds_output = dcn10_link_encoder_enable_tmds_output,
+ .enable_dp_output = dcn20_link_encoder_enable_dp_output,
+ .enable_dp_mst_output = dcn10_link_encoder_enable_dp_mst_output,
+ .disable_output = dcn10_link_encoder_disable_output,
+ .dp_set_lane_settings = dcn10_link_encoder_dp_set_lane_settings,
+ .dp_set_phy_pattern = dcn10_link_encoder_dp_set_phy_pattern,
+ .update_mst_stream_allocation_table =
+ dcn10_link_encoder_update_mst_stream_allocation_table,
+ .psr_program_dp_dphy_fast_training =
+ dcn10_psr_program_dp_dphy_fast_training,
+ .psr_program_secondary_packet = dcn10_psr_program_secondary_packet,
+ .connect_dig_be_to_fe = dcn10_link_encoder_connect_dig_be_to_fe,
+ .enable_hpd = dcn10_link_encoder_enable_hpd,
+ .disable_hpd = dcn10_link_encoder_disable_hpd,
+ .is_dig_enabled = dcn10_is_dig_enabled,
+ .destroy = dcn10_link_encoder_destroy,
+ .fec_set_enable = enc2_fec_set_enable,
+ .fec_set_ready = enc2_fec_set_ready,
+ .fec_is_active = enc2_fec_is_active,
+ .get_dig_frontend = dcn10_get_dig_frontend,
+ .get_dig_mode = dcn10_get_dig_mode,
+ .is_in_alt_mode = dcn20_link_encoder_is_in_alt_mode,
+ .get_max_link_cap = dcn20_link_encoder_get_max_link_cap,
+};
+
+void dcn30_link_encoder_construct(
+ struct dcn20_link_encoder *enc20,
+ const struct encoder_init_data *init_data,
+ const struct encoder_feature_support *enc_features,
+ const struct dcn10_link_enc_registers *link_regs,
+ const struct dcn10_link_enc_aux_registers *aux_regs,
+ const struct dcn10_link_enc_hpd_registers *hpd_regs,
+ const struct dcn10_link_enc_shift *link_shift,
+ const struct dcn10_link_enc_mask *link_mask)
+{
+ struct bp_encoder_cap_info bp_cap_info = {0};
+ const struct dc_vbios_funcs *bp_funcs = init_data->ctx->dc_bios->funcs;
+ enum bp_result result = BP_RESULT_OK;
+ struct dcn10_link_encoder *enc10 = &enc20->enc10;
+
+ enc10->base.funcs = &dcn30_link_enc_funcs;
+ enc10->base.ctx = init_data->ctx;
+ enc10->base.id = init_data->encoder;
+
+ enc10->base.hpd_source = init_data->hpd_source;
+ enc10->base.connector = init_data->connector;
+
+ enc10->base.preferred_engine = ENGINE_ID_UNKNOWN;
+
+ enc10->base.features = *enc_features;
+
+ enc10->base.transmitter = init_data->transmitter;
+
+ /* set the flag to indicate whether driver poll the I2C data pin
+ * while doing the DP sink detect
+ */
+
+/* if (dal_adapter_service_is_feature_supported(as,
+ FEATURE_DP_SINK_DETECT_POLL_DATA_PIN))
+ enc10->base.features.flags.bits.
+ DP_SINK_DETECT_POLL_DATA_PIN = true;*/
+
+ enc10->base.output_signals =
+ SIGNAL_TYPE_DVI_SINGLE_LINK |
+ SIGNAL_TYPE_DVI_DUAL_LINK |
+ SIGNAL_TYPE_LVDS |
+ SIGNAL_TYPE_DISPLAY_PORT |
+ SIGNAL_TYPE_DISPLAY_PORT_MST |
+ SIGNAL_TYPE_EDP |
+ SIGNAL_TYPE_HDMI_TYPE_A;
+
+ /* For DCE 8.0 and 8.1, by design, UNIPHY is hardwired to DIG_BE.
+ * SW always assign DIG_FE 1:1 mapped to DIG_FE for non-MST UNIPHY.
+ * SW assign DIG_FE to non-MST UNIPHY first and MST last. So prefer
+ * DIG is per UNIPHY and used by SST DP, eDP, HDMI, DVI and LVDS.
+ * Prefer DIG assignment is decided by board design.
+ * For DCE 8.0, there are only max 6 UNIPHYs, we assume board design
+ * and VBIOS will filter out 7 UNIPHY for DCE 8.0.
+ * By this, adding DIGG should not hurt DCE 8.0.
+ * This will let DCE 8.1 share DCE 8.0 as much as possible
+ */
+
+ enc10->link_regs = link_regs;
+ enc10->aux_regs = aux_regs;
+ enc10->hpd_regs = hpd_regs;
+ enc10->link_shift = link_shift;
+ enc10->link_mask = link_mask;
+
+ switch (enc10->base.transmitter) {
+ case TRANSMITTER_UNIPHY_A:
+ enc10->base.preferred_engine = ENGINE_ID_DIGA;
+ break;
+ case TRANSMITTER_UNIPHY_B:
+ enc10->base.preferred_engine = ENGINE_ID_DIGB;
+ break;
+ case TRANSMITTER_UNIPHY_C:
+ enc10->base.preferred_engine = ENGINE_ID_DIGC;
+ break;
+ case TRANSMITTER_UNIPHY_D:
+ enc10->base.preferred_engine = ENGINE_ID_DIGD;
+ break;
+ case TRANSMITTER_UNIPHY_E:
+ enc10->base.preferred_engine = ENGINE_ID_DIGE;
+ break;
+ case TRANSMITTER_UNIPHY_F:
+ enc10->base.preferred_engine = ENGINE_ID_DIGF;
+ break;
+ case TRANSMITTER_UNIPHY_G:
+ enc10->base.preferred_engine = ENGINE_ID_DIGG;
+ break;
+ default:
+ ASSERT_CRITICAL(false);
+ enc10->base.preferred_engine = ENGINE_ID_UNKNOWN;
+ }
+
+ /* default to one to mirror Windows behavior */
+ enc10->base.features.flags.bits.HDMI_6GB_EN = 1;
+
+ result = bp_funcs->get_encoder_cap_info(enc10->base.ctx->dc_bios,
+ enc10->base.id, &bp_cap_info);
+
+ /* Override features with DCE-specific values */
+ if (result == BP_RESULT_OK) {
+ enc10->base.features.flags.bits.IS_HBR2_CAPABLE =
+ bp_cap_info.DP_HBR2_EN;
+ enc10->base.features.flags.bits.IS_HBR3_CAPABLE =
+ bp_cap_info.DP_HBR3_EN;
+ enc10->base.features.flags.bits.HDMI_6GB_EN = bp_cap_info.HDMI_6GB_EN;
+ enc10->base.features.flags.bits.DP_IS_USB_C =
+ bp_cap_info.DP_IS_USB_C;
+ } else {
+ DC_LOG_WARNING("%s: Failed to get encoder_cap_info from VBIOS with error code %d!\n",
+ __func__,
+ result);
+ }
+ if (enc10->base.ctx->dc->debug.hdmi20_disable) {
+ enc10->base.features.flags.bits.HDMI_6GB_EN = 0;
+ }
+}
diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dio_link_encoder.h b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dio_link_encoder.h
new file mode 100644
index 000000000000..585d1ce63db1
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dio_link_encoder.h
@@ -0,0 +1,76 @@
+/*
+ * Copyright 2020 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef __DC_LINK_ENCODER__DCN30_H__
+#define __DC_LINK_ENCODER__DCN30_H__
+
+#include "dcn20/dcn20_link_encoder.h"
+
+#define LE_DCN3_REG_LIST(id)\
+ SRI(DIG_BE_CNTL, DIG, id), \
+ SRI(DIG_BE_EN_CNTL, DIG, id), \
+ SRI(TMDS_CTL_BITS, DIG, id), \
+ SRI(TMDS_DCBALANCER_CONTROL, DIG, id), \
+ SRI(DP_CONFIG, DP, id), \
+ SRI(DP_DPHY_CNTL, DP, id), \
+ SRI(DP_DPHY_PRBS_CNTL, DP, id), \
+ SRI(DP_DPHY_SCRAM_CNTL, DP, id),\
+ SRI(DP_DPHY_SYM0, DP, id), \
+ SRI(DP_DPHY_SYM1, DP, id), \
+ SRI(DP_DPHY_SYM2, DP, id), \
+ SRI(DP_DPHY_TRAINING_PATTERN_SEL, DP, id), \
+ SRI(DP_LINK_CNTL, DP, id), \
+ SRI(DP_LINK_FRAMING_CNTL, DP, id), \
+ SRI(DP_MSE_SAT0, DP, id), \
+ SRI(DP_MSE_SAT1, DP, id), \
+ SRI(DP_MSE_SAT2, DP, id), \
+ SRI(DP_MSE_SAT_UPDATE, DP, id), \
+ SRI(DP_SEC_CNTL, DP, id), \
+ SRI(DP_VID_STREAM_CNTL, DP, id), \
+ SRI(DP_DPHY_FAST_TRAINING, DP, id), \
+ SRI(DP_SEC_CNTL1, DP, id), \
+ SRI(DP_DPHY_BS_SR_SWAP_CNTL, DP, id), \
+ SRI(DP_DPHY_HBR2_PATTERN_CONTROL, DP, id)
+
+#define LINK_ENCODER_MASK_SH_LIST_DCN30(mask_sh) \
+ LINK_ENCODER_MASK_SH_LIST_DCN20(mask_sh)
+
+#define DPCS_DCN3_MASK_SH_LIST(mask_sh)\
+ DPCS_DCN2_MASK_SH_LIST(mask_sh),\
+ LE_SF(DPCSTX0_DPCSTX_TX_CNTL, DPCS_TX_DATA_ORDER_INVERT_18_BIT, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL0, RDPCS_PHY_TX_VBOOST_LVL, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_CLOCK_CNTL, RDPCS_TX_CLK_EN, mask_sh)
+
+void dcn30_link_encoder_construct(
+ struct dcn20_link_encoder *enc20,
+ const struct encoder_init_data *init_data,
+ const struct encoder_feature_support *enc_features,
+ const struct dcn10_link_enc_registers *link_regs,
+ const struct dcn10_link_enc_aux_registers *aux_regs,
+ const struct dcn10_link_enc_hpd_registers *hpd_regs,
+ const struct dcn10_link_enc_shift *link_shift,
+ const struct dcn10_link_enc_mask *link_mask);
+
+#endif /* __DC_LINK_ENCODER__DCN30_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dio_stream_encoder.c b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dio_stream_encoder.c
new file mode 100644
index 000000000000..f5e80a0db72b
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dio_stream_encoder.c
@@ -0,0 +1,851 @@
+/*
+ * Copyright 2020 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+
+#include "dc_bios_types.h"
+#include "dcn30_dio_stream_encoder.h"
+#include "reg_helper.h"
+#include "hw_shared.h"
+#include "core_types.h"
+#include <linux/delay.h>
+
+
+#define DC_LOGGER \
+ enc1->base.ctx->logger
+
+
+#define REG(reg)\
+ (enc1->regs->reg)
+
+#undef FN
+#define FN(reg_name, field_name) \
+ enc1->se_shift->field_name, enc1->se_mask->field_name
+
+#define VBI_LINE_0 0
+#define HDMI_CLOCK_CHANNEL_RATE_MORE_340M 340000
+
+#define CTX \
+ enc1->base.ctx
+
+
+void convert_dc_info_packet_to_128(
+ const struct dc_info_packet *info_packet,
+ struct dc_info_packet_128 *info_packet_128)
+{
+ unsigned int i;
+
+ info_packet_128->hb0 = info_packet->hb0;
+ info_packet_128->hb1 = info_packet->hb1;
+ info_packet_128->hb2 = info_packet->hb2;
+ info_packet_128->hb3 = info_packet->hb3;
+
+ for (i = 0; i < 32; i++) {
+ info_packet_128->sb[i] = info_packet->sb[i];
+ }
+
+}
+static void enc3_update_hdmi_info_packet(
+ struct dcn10_stream_encoder *enc1,
+ uint32_t packet_index,
+ const struct dc_info_packet *info_packet)
+{
+ uint32_t cont, send, line;
+
+ if (info_packet->valid) {
+ enc1->base.vpg->funcs->update_generic_info_packet(
+ enc1->base.vpg,
+ packet_index,
+ info_packet);
+
+ /* enable transmission of packet(s) -
+ * packet transmission begins on the next frame */
+ cont = 1;
+ /* send packet(s) every frame */
+ send = 1;
+ /* select line number to send packets on */
+ line = 2;
+ } else {
+ cont = 0;
+ send = 0;
+ line = 0;
+ }
+
+ /* DP_SEC_GSP[x]_LINE_REFERENCE - keep default value REFER_TO_DP_SOF */
+
+ /* choose which generic packet control to use */
+ switch (packet_index) {
+ case 0:
+ REG_UPDATE_2(HDMI_GENERIC_PACKET_CONTROL0,
+ HDMI_GENERIC0_CONT, cont,
+ HDMI_GENERIC0_SEND, send);
+ REG_UPDATE(HDMI_GENERIC_PACKET_CONTROL1,
+ HDMI_GENERIC0_LINE, line);
+ break;
+ case 1:
+ REG_UPDATE_2(HDMI_GENERIC_PACKET_CONTROL0,
+ HDMI_GENERIC1_CONT, cont,
+ HDMI_GENERIC1_SEND, send);
+ REG_UPDATE(HDMI_GENERIC_PACKET_CONTROL1,
+ HDMI_GENERIC1_LINE, line);
+ break;
+ case 2:
+ REG_UPDATE_2(HDMI_GENERIC_PACKET_CONTROL0,
+ HDMI_GENERIC2_CONT, cont,
+ HDMI_GENERIC2_SEND, send);
+ REG_UPDATE(HDMI_GENERIC_PACKET_CONTROL2,
+ HDMI_GENERIC2_LINE, line);
+ break;
+ case 3:
+ REG_UPDATE_2(HDMI_GENERIC_PACKET_CONTROL0,
+ HDMI_GENERIC3_CONT, cont,
+ HDMI_GENERIC3_SEND, send);
+ REG_UPDATE(HDMI_GENERIC_PACKET_CONTROL2,
+ HDMI_GENERIC3_LINE, line);
+ break;
+ case 4:
+ REG_UPDATE_2(HDMI_GENERIC_PACKET_CONTROL0,
+ HDMI_GENERIC4_CONT, cont,
+ HDMI_GENERIC4_SEND, send);
+ REG_UPDATE(HDMI_GENERIC_PACKET_CONTROL3,
+ HDMI_GENERIC4_LINE, line);
+ break;
+ case 5:
+ REG_UPDATE_2(HDMI_GENERIC_PACKET_CONTROL0,
+ HDMI_GENERIC5_CONT, cont,
+ HDMI_GENERIC5_SEND, send);
+ REG_UPDATE(HDMI_GENERIC_PACKET_CONTROL3,
+ HDMI_GENERIC5_LINE, line);
+ break;
+ case 6:
+ REG_UPDATE_2(HDMI_GENERIC_PACKET_CONTROL0,
+ HDMI_GENERIC6_CONT, cont,
+ HDMI_GENERIC6_SEND, send);
+ REG_UPDATE(HDMI_GENERIC_PACKET_CONTROL4,
+ HDMI_GENERIC6_LINE, line);
+ break;
+ case 7:
+ REG_UPDATE_2(HDMI_GENERIC_PACKET_CONTROL0,
+ HDMI_GENERIC7_CONT, cont,
+ HDMI_GENERIC7_SEND, send);
+ REG_UPDATE(HDMI_GENERIC_PACKET_CONTROL4,
+ HDMI_GENERIC7_LINE, line);
+ break;
+ case 8:
+ REG_UPDATE_2(HDMI_GENERIC_PACKET_CONTROL6,
+ HDMI_GENERIC8_CONT, cont,
+ HDMI_GENERIC8_SEND, send);
+ REG_UPDATE(HDMI_GENERIC_PACKET_CONTROL7,
+ HDMI_GENERIC8_LINE, line);
+ break;
+ case 9:
+ REG_UPDATE_2(HDMI_GENERIC_PACKET_CONTROL6,
+ HDMI_GENERIC9_CONT, cont,
+ HDMI_GENERIC9_SEND, send);
+ REG_UPDATE(HDMI_GENERIC_PACKET_CONTROL7,
+ HDMI_GENERIC9_LINE, line);
+ break;
+ case 10:
+ REG_UPDATE_2(HDMI_GENERIC_PACKET_CONTROL6,
+ HDMI_GENERIC10_CONT, cont,
+ HDMI_GENERIC10_SEND, send);
+ REG_UPDATE(HDMI_GENERIC_PACKET_CONTROL8,
+ HDMI_GENERIC10_LINE, line);
+ break;
+ case 11:
+ REG_UPDATE_2(HDMI_GENERIC_PACKET_CONTROL6,
+ HDMI_GENERIC11_CONT, cont,
+ HDMI_GENERIC11_SEND, send);
+ REG_UPDATE(HDMI_GENERIC_PACKET_CONTROL8,
+ HDMI_GENERIC11_LINE, line);
+ break;
+ case 12:
+ REG_UPDATE_2(HDMI_GENERIC_PACKET_CONTROL6,
+ HDMI_GENERIC12_CONT, cont,
+ HDMI_GENERIC12_SEND, send);
+ REG_UPDATE(HDMI_GENERIC_PACKET_CONTROL9,
+ HDMI_GENERIC12_LINE, line);
+ break;
+ case 13:
+ REG_UPDATE_2(HDMI_GENERIC_PACKET_CONTROL6,
+ HDMI_GENERIC13_CONT, cont,
+ HDMI_GENERIC13_SEND, send);
+ REG_UPDATE(HDMI_GENERIC_PACKET_CONTROL9,
+ HDMI_GENERIC13_LINE, line);
+ break;
+ case 14:
+ REG_UPDATE_2(HDMI_GENERIC_PACKET_CONTROL6,
+ HDMI_GENERIC14_CONT, cont,
+ HDMI_GENERIC14_SEND, send);
+ REG_UPDATE(HDMI_GENERIC_PACKET_CONTROL10,
+ HDMI_GENERIC14_LINE, line);
+ break;
+ default:
+ /* invalid HW packet index */
+ DC_LOG_WARNING(
+ "Invalid HW packet index: %s()\n",
+ __func__);
+ return;
+ }
+}
+
+static void enc3_stream_encoder_update_hdmi_info_packets(
+ struct stream_encoder *enc,
+ const struct encoder_info_frame *info_frame)
+{
+ struct dcn10_stream_encoder *enc1 = DCN10STRENC_FROM_STRENC(enc);
+
+ /* for bring up, disable dp double TODO */
+ REG_UPDATE(HDMI_DB_CONTROL, HDMI_DB_DISABLE, 1);
+ REG_UPDATE(AFMT_CNTL, AFMT_AUDIO_CLOCK_EN, 1);
+
+ /*Always add mandatory packets first followed by optional ones*/
+ enc3_update_hdmi_info_packet(enc1, 0, &info_frame->avi);
+ enc3_update_hdmi_info_packet(enc1, 5, &info_frame->hfvsif);
+ enc3_update_hdmi_info_packet(enc1, 2, &info_frame->gamut);
+ enc3_update_hdmi_info_packet(enc1, 1, &info_frame->vendor);
+ enc3_update_hdmi_info_packet(enc1, 3, &info_frame->spd);
+ enc3_update_hdmi_info_packet(enc1, 4, &info_frame->hdrsmd);
+}
+
+static void enc3_stream_encoder_stop_hdmi_info_packets(
+ struct stream_encoder *enc)
+{
+ struct dcn10_stream_encoder *enc1 = DCN10STRENC_FROM_STRENC(enc);
+
+ /* stop generic packets 0,1 on HDMI */
+ REG_SET_4(HDMI_GENERIC_PACKET_CONTROL0, 0,
+ HDMI_GENERIC0_CONT, 0,
+ HDMI_GENERIC0_SEND, 0,
+ HDMI_GENERIC1_CONT, 0,
+ HDMI_GENERIC1_SEND, 0);
+ REG_SET_2(HDMI_GENERIC_PACKET_CONTROL1, 0,
+ HDMI_GENERIC0_LINE, 0,
+ HDMI_GENERIC1_LINE, 0);
+
+ /* stop generic packets 2,3 on HDMI */
+ REG_SET_4(HDMI_GENERIC_PACKET_CONTROL0, 0,
+ HDMI_GENERIC2_CONT, 0,
+ HDMI_GENERIC2_SEND, 0,
+ HDMI_GENERIC3_CONT, 0,
+ HDMI_GENERIC3_SEND, 0);
+ REG_SET_2(HDMI_GENERIC_PACKET_CONTROL2, 0,
+ HDMI_GENERIC2_LINE, 0,
+ HDMI_GENERIC3_LINE, 0);
+
+ /* stop generic packets 4,5 on HDMI */
+ REG_SET_4(HDMI_GENERIC_PACKET_CONTROL0, 0,
+ HDMI_GENERIC4_CONT, 0,
+ HDMI_GENERIC4_SEND, 0,
+ HDMI_GENERIC5_CONT, 0,
+ HDMI_GENERIC5_SEND, 0);
+ REG_SET_2(HDMI_GENERIC_PACKET_CONTROL3, 0,
+ HDMI_GENERIC4_LINE, 0,
+ HDMI_GENERIC5_LINE, 0);
+
+ /* stop generic packets 6,7 on HDMI */
+ REG_SET_4(HDMI_GENERIC_PACKET_CONTROL0, 0,
+ HDMI_GENERIC6_CONT, 0,
+ HDMI_GENERIC6_SEND, 0,
+ HDMI_GENERIC7_CONT, 0,
+ HDMI_GENERIC7_SEND, 0);
+ REG_SET_2(HDMI_GENERIC_PACKET_CONTROL4, 0,
+ HDMI_GENERIC6_LINE, 0,
+ HDMI_GENERIC7_LINE, 0);
+
+ /* stop generic packets 8,9 on HDMI */
+ REG_SET_4(HDMI_GENERIC_PACKET_CONTROL6, 0,
+ HDMI_GENERIC8_CONT, 0,
+ HDMI_GENERIC8_SEND, 0,
+ HDMI_GENERIC9_CONT, 0,
+ HDMI_GENERIC9_SEND, 0);
+ REG_SET_2(HDMI_GENERIC_PACKET_CONTROL7, 0,
+ HDMI_GENERIC8_LINE, 0,
+ HDMI_GENERIC9_LINE, 0);
+
+ /* stop generic packets 10,11 on HDMI */
+ REG_SET_4(HDMI_GENERIC_PACKET_CONTROL6, 0,
+ HDMI_GENERIC10_CONT, 0,
+ HDMI_GENERIC10_SEND, 0,
+ HDMI_GENERIC11_CONT, 0,
+ HDMI_GENERIC11_SEND, 0);
+ REG_SET_2(HDMI_GENERIC_PACKET_CONTROL8, 0,
+ HDMI_GENERIC10_LINE, 0,
+ HDMI_GENERIC11_LINE, 0);
+
+ /* stop generic packets 12,13 on HDMI */
+ REG_SET_4(HDMI_GENERIC_PACKET_CONTROL6, 0,
+ HDMI_GENERIC12_CONT, 0,
+ HDMI_GENERIC12_SEND, 0,
+ HDMI_GENERIC13_CONT, 0,
+ HDMI_GENERIC13_SEND, 0);
+ REG_SET_2(HDMI_GENERIC_PACKET_CONTROL9, 0,
+ HDMI_GENERIC12_LINE, 0,
+ HDMI_GENERIC13_LINE, 0);
+
+ /* stop generic packet 14 on HDMI */
+ REG_SET_2(HDMI_GENERIC_PACKET_CONTROL6, 0,
+ HDMI_GENERIC14_CONT, 0,
+ HDMI_GENERIC14_SEND, 0);
+ REG_UPDATE(HDMI_GENERIC_PACKET_CONTROL10,
+ HDMI_GENERIC14_LINE, 0);
+}
+
+/* Set DSC-related configuration.
+ * dsc_mode: 0 disables DSC, other values enable DSC in specified format
+ * sc_bytes_per_pixel: Bytes per pixel in u3.28 format
+ * dsc_slice_width: Slice width in pixels
+ */
+static void enc3_dp_set_dsc_config(struct stream_encoder *enc,
+ enum optc_dsc_mode dsc_mode,
+ uint32_t dsc_bytes_per_pixel,
+ uint32_t dsc_slice_width)
+{
+ struct dcn10_stream_encoder *enc1 = DCN10STRENC_FROM_STRENC(enc);
+
+ REG_UPDATE_2(DP_DSC_CNTL,
+ DP_DSC_MODE, dsc_mode,
+ DP_DSC_SLICE_WIDTH, dsc_slice_width);
+
+ REG_SET(DP_DSC_BYTES_PER_PIXEL, 0,
+ DP_DSC_BYTES_PER_PIXEL, dsc_bytes_per_pixel);
+}
+
+
+static void enc3_dp_set_dsc_pps_info_packet(struct stream_encoder *enc,
+ bool enable,
+ uint8_t *dsc_packed_pps)
+{
+ struct dcn10_stream_encoder *enc1 = DCN10STRENC_FROM_STRENC(enc);
+
+ if (enable) {
+ struct dc_info_packet pps_sdp;
+ int i;
+
+ /* Configure for PPS packet size (128 bytes) */
+ REG_UPDATE(DP_SEC_CNTL2, DP_SEC_GSP11_PPS, 1);
+
+ /* We need turn on clock before programming AFMT block
+ *
+ * TODO: We may not need this here anymore since update_generic_info_packet
+ * no longer touches AFMT
+ */
+ REG_UPDATE(AFMT_CNTL, AFMT_AUDIO_CLOCK_EN, 1);
+
+ /* Load PPS into infoframe (SDP) registers */
+ pps_sdp.valid = true;
+ pps_sdp.hb0 = 0;
+ pps_sdp.hb1 = DC_DP_INFOFRAME_TYPE_PPS;
+ pps_sdp.hb2 = 127;
+ pps_sdp.hb3 = 0;
+
+ for (i = 0; i < 4; i++) {
+ memcpy(pps_sdp.sb, &dsc_packed_pps[i * 32], 32);
+ enc1->base.vpg->funcs->update_generic_info_packet(
+ enc1->base.vpg,
+ 11 + i,
+ &pps_sdp);
+ }
+
+ /* SW should make sure VBID[6] update line number is bigger
+ * than PPS transmit line number
+ */
+ REG_UPDATE(DP_GSP11_CNTL,
+ DP_SEC_GSP11_LINE_NUM, 2);
+ REG_UPDATE_2(DP_MSA_VBID_MISC,
+ DP_VBID6_LINE_REFERENCE, 0,
+ DP_VBID6_LINE_NUM, 3);
+
+ /* Send PPS data at the line number specified above.
+ * DP spec requires PPS to be sent only when it changes, however since
+ * decoder has to be able to handle its change on every frame, we're
+ * sending it always (i.e. on every frame) to reduce the chance it'd be
+ * missed by decoder. If it turns out required to send PPS only when it
+ * changes, we can use DP_SEC_GSP11_SEND register.
+ */
+ REG_UPDATE(DP_GSP11_CNTL,
+ DP_SEC_GSP11_ENABLE, 1);
+ REG_UPDATE(DP_SEC_CNTL,
+ DP_SEC_STREAM_ENABLE, 1);
+ } else {
+ /* Disable Generic Stream Packet 11 (GSP) transmission */
+ REG_UPDATE(DP_GSP11_CNTL, DP_SEC_GSP11_ENABLE, 0);
+ REG_UPDATE(DP_SEC_CNTL2, DP_SEC_GSP11_PPS, 0);
+ }
+}
+
+
+/* this function read dsc related register fields to be logged later in dcn10_log_hw_state
+ * into a dcn_dsc_state struct.
+ */
+static void enc3_read_state(struct stream_encoder *enc, struct enc_state *s)
+{
+ struct dcn10_stream_encoder *enc1 = DCN10STRENC_FROM_STRENC(enc);
+
+ //if dsc is enabled, continue to read
+ REG_GET(DP_DSC_CNTL, DP_DSC_MODE, &s->dsc_mode);
+ if (s->dsc_mode) {
+ REG_GET(DP_DSC_CNTL, DP_DSC_SLICE_WIDTH, &s->dsc_slice_width);
+ REG_GET(DP_GSP11_CNTL, DP_SEC_GSP11_LINE_NUM, &s->sec_gsp_pps_line_num);
+
+ REG_GET(DP_MSA_VBID_MISC, DP_VBID6_LINE_REFERENCE, &s->vbid6_line_reference);
+ REG_GET(DP_MSA_VBID_MISC, DP_VBID6_LINE_NUM, &s->vbid6_line_num);
+
+ REG_GET(DP_GSP11_CNTL, DP_SEC_GSP11_ENABLE, &s->sec_gsp_pps_enable);
+ REG_GET(DP_SEC_CNTL, DP_SEC_STREAM_ENABLE, &s->sec_stream_enable);
+ }
+}
+
+static void enc3_stream_encoder_update_dp_info_packets(
+ struct stream_encoder *enc,
+ const struct encoder_info_frame *info_frame)
+{
+ struct dcn10_stream_encoder *enc1 = DCN10STRENC_FROM_STRENC(enc);
+ uint32_t value = 0;
+ uint32_t dmdata_packet_enabled = 0;
+
+ if (info_frame->vsc.valid) {
+ enc->vpg->funcs->update_generic_info_packet(
+ enc->vpg,
+ 0, /* packetIndex */
+ &info_frame->vsc);
+ }
+ if (info_frame->spd.valid) {
+ enc->vpg->funcs->update_generic_info_packet(
+ enc->vpg,
+ 2, /* packetIndex */
+ &info_frame->spd);
+ }
+ if (info_frame->hdrsmd.valid) {
+ enc->vpg->funcs->update_generic_info_packet(
+ enc->vpg,
+ 3, /* packetIndex */
+ &info_frame->hdrsmd);
+ }
+ /* packetIndex 4 is used for send immediate sdp message, and please
+ * use other packetIndex (such as 5,6) for other info packet
+ */
+
+ /* enable/disable transmission of packet(s).
+ * If enabled, packet transmission begins on the next frame
+ */
+ REG_UPDATE(DP_SEC_CNTL, DP_SEC_GSP0_ENABLE, info_frame->vsc.valid);
+ REG_UPDATE(DP_SEC_CNTL, DP_SEC_GSP2_ENABLE, info_frame->spd.valid);
+ REG_UPDATE(DP_SEC_CNTL, DP_SEC_GSP3_ENABLE, info_frame->hdrsmd.valid);
+
+
+ /* This bit is the master enable bit.
+ * When enabling secondary stream engine,
+ * this master bit must also be set.
+ * This register shared with audio info frame.
+ * Therefore we need to enable master bit
+ * if at least on of the fields is not 0
+ */
+ value = REG_READ(DP_SEC_CNTL);
+ if (value)
+ REG_UPDATE(DP_SEC_CNTL, DP_SEC_STREAM_ENABLE, 1);
+
+ /* check if dynamic metadata packet transmission is enabled */
+ REG_GET(DP_SEC_METADATA_TRANSMISSION,
+ DP_SEC_METADATA_PACKET_ENABLE, &dmdata_packet_enabled);
+
+ if (dmdata_packet_enabled)
+ REG_UPDATE(DP_SEC_CNTL, DP_SEC_STREAM_ENABLE, 1);
+}
+
+static void enc3_dp_set_odm_combine(
+ struct stream_encoder *enc,
+ bool odm_combine)
+{
+ struct dcn10_stream_encoder *enc1 = DCN10STRENC_FROM_STRENC(enc);
+
+ REG_UPDATE(DP_PIXEL_FORMAT, DP_PIXEL_COMBINE, odm_combine);
+}
+
+/* setup stream encoder in dvi mode */
+void enc3_stream_encoder_dvi_set_stream_attribute(
+ struct stream_encoder *enc,
+ struct dc_crtc_timing *crtc_timing,
+ bool is_dual_link)
+{
+ struct dcn10_stream_encoder *enc1 = DCN10STRENC_FROM_STRENC(enc);
+
+ if (!enc->ctx->dc->debug.avoid_vbios_exec_table) {
+ struct bp_encoder_control cntl = {0};
+
+ cntl.action = ENCODER_CONTROL_SETUP;
+ cntl.engine_id = enc1->base.id;
+ cntl.signal = is_dual_link ?
+ SIGNAL_TYPE_DVI_DUAL_LINK : SIGNAL_TYPE_DVI_SINGLE_LINK;
+ cntl.enable_dp_audio = false;
+ cntl.pixel_clock = crtc_timing->pix_clk_100hz / 10;
+ cntl.lanes_number = (is_dual_link) ? LANE_COUNT_EIGHT : LANE_COUNT_FOUR;
+
+ if (enc1->base.bp->funcs->encoder_control(
+ enc1->base.bp, &cntl) != BP_RESULT_OK)
+ return;
+
+ } else {
+
+ //Set pattern for clock channel, default vlue 0x63 does not work
+ REG_UPDATE(DIG_CLOCK_PATTERN, DIG_CLOCK_PATTERN, 0x1F);
+
+ //DIG_BE_TMDS_DVI_MODE : TMDS-DVI mode is already set in link_encoder_setup
+
+ //DIG_SOURCE_SELECT is already set in dig_connect_to_otg
+
+ /* set DIG_START to 0x1 to reset FIFO */
+ REG_UPDATE(DIG_FE_CNTL, DIG_START, 1);
+ udelay(1);
+
+ /* write 0 to take the FIFO out of reset */
+ REG_UPDATE(DIG_FE_CNTL, DIG_START, 0);
+ udelay(1);
+ }
+
+ ASSERT(crtc_timing->pixel_encoding == PIXEL_ENCODING_RGB);
+ ASSERT(crtc_timing->display_color_depth == COLOR_DEPTH_888);
+ enc1_stream_encoder_set_stream_attribute_helper(enc1, crtc_timing);
+}
+
+/* setup stream encoder in hdmi mode */
+static void enc3_stream_encoder_hdmi_set_stream_attribute(
+ struct stream_encoder *enc,
+ struct dc_crtc_timing *crtc_timing,
+ int actual_pix_clk_khz,
+ bool enable_audio)
+{
+ struct dcn10_stream_encoder *enc1 = DCN10STRENC_FROM_STRENC(enc);
+
+ if (!enc->ctx->dc->debug.avoid_vbios_exec_table) {
+ struct bp_encoder_control cntl = {0};
+
+ cntl.action = ENCODER_CONTROL_SETUP;
+ cntl.engine_id = enc1->base.id;
+ cntl.signal = SIGNAL_TYPE_HDMI_TYPE_A;
+ cntl.enable_dp_audio = enable_audio;
+ cntl.pixel_clock = actual_pix_clk_khz;
+ cntl.lanes_number = LANE_COUNT_FOUR;
+
+ if (enc1->base.bp->funcs->encoder_control(
+ enc1->base.bp, &cntl) != BP_RESULT_OK)
+ return;
+
+ } else {
+
+ //Set pattern for clock channel, default vlue 0x63 does not work
+ REG_UPDATE(DIG_CLOCK_PATTERN, DIG_CLOCK_PATTERN, 0x1F);
+
+ //DIG_BE_TMDS_HDMI_MODE : TMDS-HDMI mode is already set in link_encoder_setup
+
+ //DIG_SOURCE_SELECT is already set in dig_connect_to_otg
+
+ /* set DIG_START to 0x1 to reset FIFO */
+ REG_UPDATE(DIG_FE_CNTL, DIG_START, 1);
+ udelay(1);
+
+ /* write 0 to take the FIFO out of reset */
+ REG_UPDATE(DIG_FE_CNTL, DIG_START, 0);
+ udelay(1);
+ }
+
+ /* Configure pixel encoding */
+ enc1_stream_encoder_set_stream_attribute_helper(enc1, crtc_timing);
+
+ /* setup HDMI engine */
+ REG_UPDATE_6(HDMI_CONTROL,
+ HDMI_PACKET_GEN_VERSION, 1,
+ HDMI_KEEPOUT_MODE, 1,
+ HDMI_DEEP_COLOR_ENABLE, 0,
+ HDMI_DATA_SCRAMBLE_EN, 0,
+ HDMI_NO_EXTRA_NULL_PACKET_FILLED, 1,
+ HDMI_CLOCK_CHANNEL_RATE, 0);
+
+ /* Configure color depth */
+ switch (crtc_timing->display_color_depth) {
+ case COLOR_DEPTH_888:
+ REG_UPDATE(HDMI_CONTROL, HDMI_DEEP_COLOR_DEPTH, 0);
+ break;
+ case COLOR_DEPTH_101010:
+ if (crtc_timing->pixel_encoding == PIXEL_ENCODING_YCBCR422) {
+ REG_UPDATE_2(HDMI_CONTROL,
+ HDMI_DEEP_COLOR_DEPTH, 1,
+ HDMI_DEEP_COLOR_ENABLE, 0);
+ } else {
+ REG_UPDATE_2(HDMI_CONTROL,
+ HDMI_DEEP_COLOR_DEPTH, 1,
+ HDMI_DEEP_COLOR_ENABLE, 1);
+ }
+ break;
+ case COLOR_DEPTH_121212:
+ if (crtc_timing->pixel_encoding == PIXEL_ENCODING_YCBCR422) {
+ REG_UPDATE_2(HDMI_CONTROL,
+ HDMI_DEEP_COLOR_DEPTH, 2,
+ HDMI_DEEP_COLOR_ENABLE, 0);
+ } else {
+ REG_UPDATE_2(HDMI_CONTROL,
+ HDMI_DEEP_COLOR_DEPTH, 2,
+ HDMI_DEEP_COLOR_ENABLE, 1);
+ }
+ break;
+ case COLOR_DEPTH_161616:
+ REG_UPDATE_2(HDMI_CONTROL,
+ HDMI_DEEP_COLOR_DEPTH, 3,
+ HDMI_DEEP_COLOR_ENABLE, 1);
+ break;
+ default:
+ break;
+ }
+
+ if (actual_pix_clk_khz >= HDMI_CLOCK_CHANNEL_RATE_MORE_340M) {
+ /* enable HDMI data scrambler
+ * HDMI_CLOCK_CHANNEL_RATE_MORE_340M
+ * Clock channel frequency is 1/4 of character rate.
+ */
+ REG_UPDATE_2(HDMI_CONTROL,
+ HDMI_DATA_SCRAMBLE_EN, 1,
+ HDMI_CLOCK_CHANNEL_RATE, 1);
+ } else if (crtc_timing->flags.LTE_340MCSC_SCRAMBLE) {
+
+ /* TODO: New feature for DCE11, still need to implement */
+
+ /* enable HDMI data scrambler
+ * HDMI_CLOCK_CHANNEL_FREQ_EQUAL_TO_CHAR_RATE
+ * Clock channel frequency is the same
+ * as character rate
+ */
+ REG_UPDATE_2(HDMI_CONTROL,
+ HDMI_DATA_SCRAMBLE_EN, 1,
+ HDMI_CLOCK_CHANNEL_RATE, 0);
+ }
+
+
+ /* Enable transmission of General Control packet on every frame */
+ REG_UPDATE_3(HDMI_VBI_PACKET_CONTROL,
+ HDMI_GC_CONT, 1,
+ HDMI_GC_SEND, 1,
+ HDMI_NULL_SEND, 1);
+
+ /* following belongs to audio */
+ /* Enable Audio InfoFrame packet transmission. */
+ REG_UPDATE(HDMI_INFOFRAME_CONTROL0, HDMI_AUDIO_INFO_SEND, 1);
+
+ /* update double-buffered AUDIO_INFO registers immediately */
+ ASSERT (enc->afmt);
+ enc->afmt->funcs->audio_info_immediate_update(enc->afmt);
+
+ /* Select line number on which to send Audio InfoFrame packets */
+ REG_UPDATE(HDMI_INFOFRAME_CONTROL1, HDMI_AUDIO_INFO_LINE,
+ VBI_LINE_0 + 2);
+
+ /* set HDMI GC AVMUTE */
+ REG_UPDATE(HDMI_GC, HDMI_GC_AVMUTE, 0);
+}
+
+static void enc3_audio_mute_control(
+ struct stream_encoder *enc,
+ bool mute)
+{
+ ASSERT (enc->afmt);
+ enc->afmt->funcs->audio_mute_control(enc->afmt, mute);
+}
+
+static void enc3_se_dp_audio_setup(
+ struct stream_encoder *enc,
+ unsigned int az_inst,
+ struct audio_info *info)
+{
+ ASSERT (enc->afmt);
+ enc->afmt->funcs->se_audio_setup(enc->afmt, az_inst, info);
+}
+
+#define DP_SEC_AUD_N__DP_SEC_AUD_N__DEFAULT 0x8000
+#define DP_SEC_TIMESTAMP__DP_SEC_TIMESTAMP_MODE__AUTO_CALC 1
+
+static void enc3_se_setup_dp_audio(
+ struct stream_encoder *enc)
+{
+ struct dcn10_stream_encoder *enc1 = DCN10STRENC_FROM_STRENC(enc);
+
+ /* --- DP Audio packet configurations --- */
+
+ /* ATP Configuration */
+ REG_SET(DP_SEC_AUD_N, 0,
+ DP_SEC_AUD_N, DP_SEC_AUD_N__DP_SEC_AUD_N__DEFAULT);
+
+ /* Async/auto-calc timestamp mode */
+ REG_SET(DP_SEC_TIMESTAMP, 0, DP_SEC_TIMESTAMP_MODE,
+ DP_SEC_TIMESTAMP__DP_SEC_TIMESTAMP_MODE__AUTO_CALC);
+
+ ASSERT (enc->afmt);
+ enc->afmt->funcs->setup_dp_audio(enc->afmt);
+}
+
+static void enc3_se_dp_audio_enable(
+ struct stream_encoder *enc)
+{
+ enc1_se_enable_audio_clock(enc, true);
+ enc3_se_setup_dp_audio(enc);
+ enc1_se_enable_dp_audio(enc);
+}
+
+static void enc3_se_setup_hdmi_audio(
+ struct stream_encoder *enc,
+ const struct audio_crtc_info *crtc_info)
+{
+ struct dcn10_stream_encoder *enc1 = DCN10STRENC_FROM_STRENC(enc);
+
+ struct audio_clock_info audio_clock_info = {0};
+
+ /* Setup audio in AFMT - program AFMT block associated with DIO */
+ ASSERT (enc->afmt);
+ enc->afmt->funcs->setup_hdmi_audio(enc->afmt);
+
+ /* HDMI_AUDIO_PACKET_CONTROL */
+ REG_UPDATE(HDMI_AUDIO_PACKET_CONTROL,
+ HDMI_AUDIO_DELAY_EN, 1);
+
+ /* HDMI_ACR_PACKET_CONTROL */
+ REG_UPDATE_3(HDMI_ACR_PACKET_CONTROL,
+ HDMI_ACR_AUTO_SEND, 1,
+ HDMI_ACR_SOURCE, 0,
+ HDMI_ACR_AUDIO_PRIORITY, 0);
+
+ /* Program audio clock sample/regeneration parameters */
+ get_audio_clock_info(crtc_info->color_depth,
+ crtc_info->requested_pixel_clock_100Hz,
+ crtc_info->calculated_pixel_clock_100Hz,
+ &audio_clock_info);
+ DC_LOG_HW_AUDIO(
+ "\n%s:Input::requested_pixel_clock_100Hz = %d" \
+ "calculated_pixel_clock_100Hz = %d \n", __func__, \
+ crtc_info->requested_pixel_clock_100Hz, \
+ crtc_info->calculated_pixel_clock_100Hz);
+
+ /* HDMI_ACR_32_0__HDMI_ACR_CTS_32_MASK */
+ REG_UPDATE(HDMI_ACR_32_0, HDMI_ACR_CTS_32, audio_clock_info.cts_32khz);
+
+ /* HDMI_ACR_32_1__HDMI_ACR_N_32_MASK */
+ REG_UPDATE(HDMI_ACR_32_1, HDMI_ACR_N_32, audio_clock_info.n_32khz);
+
+ /* HDMI_ACR_44_0__HDMI_ACR_CTS_44_MASK */
+ REG_UPDATE(HDMI_ACR_44_0, HDMI_ACR_CTS_44, audio_clock_info.cts_44khz);
+
+ /* HDMI_ACR_44_1__HDMI_ACR_N_44_MASK */
+ REG_UPDATE(HDMI_ACR_44_1, HDMI_ACR_N_44, audio_clock_info.n_44khz);
+
+ /* HDMI_ACR_48_0__HDMI_ACR_CTS_48_MASK */
+ REG_UPDATE(HDMI_ACR_48_0, HDMI_ACR_CTS_48, audio_clock_info.cts_48khz);
+
+ /* HDMI_ACR_48_1__HDMI_ACR_N_48_MASK */
+ REG_UPDATE(HDMI_ACR_48_1, HDMI_ACR_N_48, audio_clock_info.n_48khz);
+
+ /* Video driver cannot know in advance which sample rate will
+ * be used by HD Audio driver
+ * HDMI_ACR_PACKET_CONTROL__HDMI_ACR_N_MULTIPLE field is
+ * programmed below in interruppt callback
+ */
+}
+
+static void enc3_se_hdmi_audio_setup(
+ struct stream_encoder *enc,
+ unsigned int az_inst,
+ struct audio_info *info,
+ struct audio_crtc_info *audio_crtc_info)
+{
+ enc1_se_enable_audio_clock(enc, true);
+ enc3_se_setup_hdmi_audio(enc, audio_crtc_info);
+ ASSERT (enc->afmt);
+ enc->afmt->funcs->se_audio_setup(enc->afmt, az_inst, info);
+}
+
+
+static const struct stream_encoder_funcs dcn30_str_enc_funcs = {
+ .dp_set_odm_combine =
+ enc3_dp_set_odm_combine,
+ .dp_set_stream_attribute =
+ enc2_stream_encoder_dp_set_stream_attribute,
+ .hdmi_set_stream_attribute =
+ enc3_stream_encoder_hdmi_set_stream_attribute,
+ .dvi_set_stream_attribute =
+ enc3_stream_encoder_dvi_set_stream_attribute,
+ .set_mst_bandwidth =
+ enc1_stream_encoder_set_mst_bandwidth,
+ .update_hdmi_info_packets =
+ enc3_stream_encoder_update_hdmi_info_packets,
+ .stop_hdmi_info_packets =
+ enc3_stream_encoder_stop_hdmi_info_packets,
+ .update_dp_info_packets =
+ enc3_stream_encoder_update_dp_info_packets,
+ .stop_dp_info_packets =
+ enc1_stream_encoder_stop_dp_info_packets,
+ .dp_blank =
+ enc1_stream_encoder_dp_blank,
+ .dp_unblank =
+ enc2_stream_encoder_dp_unblank,
+ .audio_mute_control = enc3_audio_mute_control,
+
+ .dp_audio_setup = enc3_se_dp_audio_setup,
+ .dp_audio_enable = enc3_se_dp_audio_enable,
+ .dp_audio_disable = enc1_se_dp_audio_disable,
+
+ .hdmi_audio_setup = enc3_se_hdmi_audio_setup,
+ .hdmi_audio_disable = enc1_se_hdmi_audio_disable,
+ .setup_stereo_sync = enc1_setup_stereo_sync,
+ .set_avmute = enc1_stream_encoder_set_avmute,
+ .dig_connect_to_otg = enc1_dig_connect_to_otg,
+ .dig_source_otg = enc1_dig_source_otg,
+
+ .dp_get_pixel_format = enc1_stream_encoder_dp_get_pixel_format,
+
+ .enc_read_state = enc3_read_state,
+ .dp_set_dsc_config = enc3_dp_set_dsc_config,
+ .dp_set_dsc_pps_info_packet = enc3_dp_set_dsc_pps_info_packet,
+ .set_dynamic_metadata = enc2_set_dynamic_metadata,
+ .hdmi_reset_stream_attribute = enc1_reset_hdmi_stream_attribute,
+};
+
+void dcn30_dio_stream_encoder_construct(
+ struct dcn10_stream_encoder *enc1,
+ struct dc_context *ctx,
+ struct dc_bios *bp,
+ enum engine_id eng_id,
+ struct vpg *vpg,
+ struct afmt *afmt,
+ const struct dcn10_stream_enc_registers *regs,
+ const struct dcn10_stream_encoder_shift *se_shift,
+ const struct dcn10_stream_encoder_mask *se_mask)
+{
+ enc1->base.funcs = &dcn30_str_enc_funcs;
+ enc1->base.ctx = ctx;
+ enc1->base.id = eng_id;
+ enc1->base.bp = bp;
+ enc1->base.vpg = vpg;
+ enc1->base.afmt = afmt;
+ enc1->regs = regs;
+ enc1->se_shift = se_shift;
+ enc1->se_mask = se_mask;
+ enc1->base.stream_enc_inst = vpg->inst;
+}
+
diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dio_stream_encoder.h b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dio_stream_encoder.h
new file mode 100644
index 000000000000..8db6d76a1131
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dio_stream_encoder.h
@@ -0,0 +1,269 @@
+/*
+ * Copyright 2020 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef __DC_DIO_STREAM_ENCODER_DCN30_H__
+#define __DC_DIO_STREAM_ENCODER_DCN30_H__
+
+#include "dcn30/dcn30_vpg.h"
+#include "dcn30/dcn30_afmt.h"
+#include "stream_encoder.h"
+#include "dcn20/dcn20_stream_encoder.h"
+
+/* Register bit field name change */
+#define RDPCSTX0_RDPCSTX_CLOCK_CNTL__RDPCS_SYMCLK_DIV2_GATE_DIS__SHIFT 0x8
+#define RDPCSTX0_RDPCSTX_CLOCK_CNTL__RDPCS_SYMCLK_DIV2_EN__SHIFT 0x9
+#define RDPCSTX0_RDPCSTX_CLOCK_CNTL__RDPCS_SYMCLK_DIV2_CLOCK_ON__SHIFT 0xa
+#define DPCSTX0_DPCSTX_TX_CNTL__DPCS_TX_DATA_SWAP__SHIFT 0xe
+#define DPCSTX0_DPCSTX_TX_CNTL__DPCS_TX_DATA_ORDER_INVERT__SHIFT 0xf
+
+#define RDPCSTX0_RDPCSTX_CLOCK_CNTL__RDPCS_SYMCLK_DIV2_GATE_DIS_MASK 0x00000100L
+#define RDPCSTX0_RDPCSTX_CLOCK_CNTL__RDPCS_SYMCLK_DIV2_EN_MASK 0x00000200L
+#define RDPCSTX0_RDPCSTX_CLOCK_CNTL__RDPCS_SYMCLK_DIV2_CLOCK_ON_MASK 0x00000400L
+#define DPCSTX0_DPCSTX_TX_CNTL__DPCS_TX_DATA_SWAP_MASK 0x00004000L
+#define DPCSTX0_DPCSTX_TX_CNTL__DPCS_TX_DATA_ORDER_INVERT_MASK 0x00008000L
+
+
+#define SE_DCN3_REG_LIST(id)\
+ SRI(AFMT_CNTL, DIG, id), \
+ SRI(DIG_FE_CNTL, DIG, id), \
+ SRI(HDMI_CONTROL, DIG, id), \
+ SRI(HDMI_DB_CONTROL, DIG, id), \
+ SRI(HDMI_GC, DIG, id), \
+ SRI(HDMI_GENERIC_PACKET_CONTROL0, DIG, id), \
+ SRI(HDMI_GENERIC_PACKET_CONTROL1, DIG, id), \
+ SRI(HDMI_GENERIC_PACKET_CONTROL2, DIG, id), \
+ SRI(HDMI_GENERIC_PACKET_CONTROL3, DIG, id), \
+ SRI(HDMI_GENERIC_PACKET_CONTROL4, DIG, id), \
+ SRI(HDMI_GENERIC_PACKET_CONTROL5, DIG, id), \
+ SRI(HDMI_GENERIC_PACKET_CONTROL6, DIG, id), \
+ SRI(HDMI_GENERIC_PACKET_CONTROL7, DIG, id), \
+ SRI(HDMI_GENERIC_PACKET_CONTROL8, DIG, id), \
+ SRI(HDMI_GENERIC_PACKET_CONTROL9, DIG, id), \
+ SRI(HDMI_GENERIC_PACKET_CONTROL10, DIG, id), \
+ SRI(HDMI_INFOFRAME_CONTROL0, DIG, id), \
+ SRI(HDMI_INFOFRAME_CONTROL1, DIG, id), \
+ SRI(HDMI_VBI_PACKET_CONTROL, DIG, id), \
+ SRI(HDMI_AUDIO_PACKET_CONTROL, DIG, id),\
+ SRI(HDMI_ACR_PACKET_CONTROL, DIG, id),\
+ SRI(HDMI_ACR_32_0, DIG, id),\
+ SRI(HDMI_ACR_32_1, DIG, id),\
+ SRI(HDMI_ACR_44_0, DIG, id),\
+ SRI(HDMI_ACR_44_1, DIG, id),\
+ SRI(HDMI_ACR_48_0, DIG, id),\
+ SRI(HDMI_ACR_48_1, DIG, id),\
+ SRI(DP_DB_CNTL, DP, id), \
+ SRI(DP_MSA_MISC, DP, id), \
+ SRI(DP_MSA_VBID_MISC, DP, id), \
+ SRI(DP_MSA_COLORIMETRY, DP, id), \
+ SRI(DP_MSA_TIMING_PARAM1, DP, id), \
+ SRI(DP_MSA_TIMING_PARAM2, DP, id), \
+ SRI(DP_MSA_TIMING_PARAM3, DP, id), \
+ SRI(DP_MSA_TIMING_PARAM4, DP, id), \
+ SRI(DP_MSE_RATE_CNTL, DP, id), \
+ SRI(DP_MSE_RATE_UPDATE, DP, id), \
+ SRI(DP_PIXEL_FORMAT, DP, id), \
+ SRI(DP_SEC_CNTL, DP, id), \
+ SRI(DP_SEC_CNTL2, DP, id), \
+ SRI(DP_SEC_CNTL6, DP, id), \
+ SRI(DP_STEER_FIFO, DP, id), \
+ SRI(DP_VID_M, DP, id), \
+ SRI(DP_VID_N, DP, id), \
+ SRI(DP_VID_STREAM_CNTL, DP, id), \
+ SRI(DP_VID_TIMING, DP, id), \
+ SRI(DP_SEC_AUD_N, DP, id), \
+ SRI(DP_SEC_TIMESTAMP, DP, id), \
+ SRI(DP_DSC_CNTL, DP, id), \
+ SRI(DP_DSC_BYTES_PER_PIXEL, DP, id), \
+ SRI(DP_SEC_METADATA_TRANSMISSION, DP, id), \
+ SRI(HDMI_METADATA_PACKET_CONTROL, DIG, id), \
+ SRI(DP_SEC_FRAMING4, DP, id), \
+ SRI(DP_GSP11_CNTL, DP, id), \
+ SRI(DME_CONTROL, DME, id),\
+ SRI(DP_SEC_METADATA_TRANSMISSION, DP, id), \
+ SRI(HDMI_METADATA_PACKET_CONTROL, DIG, id), \
+ SRI(DIG_FE_CNTL, DIG, id), \
+ SRI(DIG_CLOCK_PATTERN, DIG, id)
+
+
+#define SE_COMMON_MASK_SH_LIST_DCN30_BASE(mask_sh)\
+ SE_SF(DP0_DP_PIXEL_FORMAT, DP_PIXEL_ENCODING, mask_sh),\
+ SE_SF(DP0_DP_PIXEL_FORMAT, DP_COMPONENT_DEPTH, mask_sh),\
+ SE_SF(DIG0_HDMI_CONTROL, HDMI_PACKET_GEN_VERSION, mask_sh),\
+ SE_SF(DIG0_HDMI_CONTROL, HDMI_KEEPOUT_MODE, mask_sh),\
+ SE_SF(DIG0_HDMI_CONTROL, HDMI_DEEP_COLOR_ENABLE, mask_sh),\
+ SE_SF(DIG0_HDMI_CONTROL, HDMI_DEEP_COLOR_DEPTH, mask_sh),\
+ SE_SF(DIG0_HDMI_CONTROL, HDMI_DATA_SCRAMBLE_EN, mask_sh),\
+ SE_SF(DIG0_HDMI_CONTROL, HDMI_NO_EXTRA_NULL_PACKET_FILLED, mask_sh),\
+ SE_SF(DIG0_HDMI_VBI_PACKET_CONTROL, HDMI_GC_CONT, mask_sh),\
+ SE_SF(DIG0_HDMI_VBI_PACKET_CONTROL, HDMI_GC_SEND, mask_sh),\
+ SE_SF(DIG0_HDMI_VBI_PACKET_CONTROL, HDMI_NULL_SEND, mask_sh),\
+ SE_SF(DIG0_HDMI_INFOFRAME_CONTROL0, HDMI_AUDIO_INFO_SEND, mask_sh),\
+ SE_SF(DIG0_HDMI_INFOFRAME_CONTROL1, HDMI_AUDIO_INFO_LINE, mask_sh),\
+ SE_SF(DIG0_HDMI_GC, HDMI_GC_AVMUTE, mask_sh),\
+ SE_SF(DP0_DP_MSE_RATE_CNTL, DP_MSE_RATE_X, mask_sh),\
+ SE_SF(DP0_DP_MSE_RATE_CNTL, DP_MSE_RATE_Y, mask_sh),\
+ SE_SF(DP0_DP_MSE_RATE_UPDATE, DP_MSE_RATE_UPDATE_PENDING, mask_sh),\
+ SE_SF(DP0_DP_SEC_CNTL, DP_SEC_GSP0_ENABLE, mask_sh),\
+ SE_SF(DP0_DP_SEC_CNTL, DP_SEC_STREAM_ENABLE, mask_sh),\
+ SE_SF(DP0_DP_SEC_CNTL, DP_SEC_GSP1_ENABLE, mask_sh),\
+ SE_SF(DP0_DP_SEC_CNTL, DP_SEC_GSP2_ENABLE, mask_sh),\
+ SE_SF(DP0_DP_SEC_CNTL, DP_SEC_GSP3_ENABLE, mask_sh),\
+ SE_SF(DP0_DP_SEC_CNTL, DP_SEC_MPG_ENABLE, mask_sh),\
+ SE_SF(DP0_DP_SEC_CNTL2, DP_SEC_GSP4_SEND, mask_sh),\
+ SE_SF(DP0_DP_SEC_CNTL2, DP_SEC_GSP4_SEND_PENDING, mask_sh),\
+ SE_SF(DP0_DP_SEC_CNTL4, DP_SEC_GSP4_LINE_NUM, mask_sh),\
+ SE_SF(DP0_DP_SEC_CNTL2, DP_SEC_GSP4_SEND_ANY_LINE, mask_sh),\
+ SE_SF(DP0_DP_VID_STREAM_CNTL, DP_VID_STREAM_DIS_DEFER, mask_sh),\
+ SE_SF(DP0_DP_VID_STREAM_CNTL, DP_VID_STREAM_ENABLE, mask_sh),\
+ SE_SF(DP0_DP_VID_STREAM_CNTL, DP_VID_STREAM_STATUS, mask_sh),\
+ SE_SF(DP0_DP_STEER_FIFO, DP_STEER_FIFO_RESET, mask_sh),\
+ SE_SF(DP0_DP_VID_TIMING, DP_VID_M_N_GEN_EN, mask_sh),\
+ SE_SF(DP0_DP_VID_N, DP_VID_N, mask_sh),\
+ SE_SF(DP0_DP_VID_M, DP_VID_M, mask_sh),\
+ SE_SF(DIG0_DIG_FE_CNTL, DIG_START, mask_sh),\
+ SE_SF(DIG0_HDMI_AUDIO_PACKET_CONTROL, HDMI_AUDIO_DELAY_EN, mask_sh),\
+ SE_SF(DIG0_HDMI_ACR_PACKET_CONTROL, HDMI_ACR_AUTO_SEND, mask_sh),\
+ SE_SF(DIG0_HDMI_ACR_PACKET_CONTROL, HDMI_ACR_SOURCE, mask_sh),\
+ SE_SF(DIG0_HDMI_ACR_PACKET_CONTROL, HDMI_ACR_AUDIO_PRIORITY, mask_sh),\
+ SE_SF(DIG0_HDMI_ACR_32_0, HDMI_ACR_CTS_32, mask_sh),\
+ SE_SF(DIG0_HDMI_ACR_32_1, HDMI_ACR_N_32, mask_sh),\
+ SE_SF(DIG0_HDMI_ACR_44_0, HDMI_ACR_CTS_44, mask_sh),\
+ SE_SF(DIG0_HDMI_ACR_44_1, HDMI_ACR_N_44, mask_sh),\
+ SE_SF(DIG0_HDMI_ACR_48_0, HDMI_ACR_CTS_48, mask_sh),\
+ SE_SF(DIG0_HDMI_ACR_48_1, HDMI_ACR_N_48, mask_sh),\
+ SE_SF(DP0_DP_SEC_AUD_N, DP_SEC_AUD_N, mask_sh),\
+ SE_SF(DP0_DP_SEC_TIMESTAMP, DP_SEC_TIMESTAMP_MODE, mask_sh),\
+ SE_SF(DP0_DP_SEC_CNTL, DP_SEC_ASP_ENABLE, mask_sh),\
+ SE_SF(DP0_DP_SEC_CNTL, DP_SEC_ATP_ENABLE, mask_sh),\
+ SE_SF(DP0_DP_SEC_CNTL, DP_SEC_AIP_ENABLE, mask_sh),\
+ SE_SF(DP0_DP_SEC_CNTL, DP_SEC_ACM_ENABLE, mask_sh),\
+ SE_SF(DIG0_AFMT_CNTL, AFMT_AUDIO_CLOCK_EN, mask_sh),\
+ SE_SF(DIG0_HDMI_CONTROL, HDMI_CLOCK_CHANNEL_RATE, mask_sh),\
+ SE_SF(DIG0_DIG_FE_CNTL, TMDS_PIXEL_ENCODING, mask_sh),\
+ SE_SF(DIG0_DIG_FE_CNTL, TMDS_COLOR_FORMAT, mask_sh),\
+ SE_SF(DIG0_DIG_FE_CNTL, DIG_STEREOSYNC_SELECT, mask_sh),\
+ SE_SF(DIG0_DIG_FE_CNTL, DIG_STEREOSYNC_GATE_EN, mask_sh),\
+ SE_SF(DP0_DP_SEC_CNTL, DP_SEC_GSP4_ENABLE, mask_sh),\
+ SE_SF(DP0_DP_SEC_CNTL, DP_SEC_GSP5_ENABLE, mask_sh),\
+ SE_SF(DP0_DP_SEC_CNTL, DP_SEC_GSP6_ENABLE, mask_sh),\
+ SE_SF(DP0_DP_SEC_CNTL, DP_SEC_GSP7_ENABLE, mask_sh),\
+ SE_SF(DP0_DP_SEC_CNTL2, DP_SEC_GSP7_SEND, mask_sh),\
+ SE_SF(DP0_DP_SEC_CNTL6, DP_SEC_GSP7_LINE_NUM, mask_sh),\
+ SE_SF(DP0_DP_SEC_CNTL2, DP_SEC_GSP11_PPS, mask_sh),\
+ SE_SF(DP0_DP_GSP11_CNTL, DP_SEC_GSP11_ENABLE, mask_sh),\
+ SE_SF(DP0_DP_GSP11_CNTL, DP_SEC_GSP11_LINE_NUM, mask_sh),\
+ SE_SF(DP0_DP_DB_CNTL, DP_DB_DISABLE, mask_sh),\
+ SE_SF(DP0_DP_MSA_COLORIMETRY, DP_MSA_MISC0, mask_sh),\
+ SE_SF(DP0_DP_MSA_TIMING_PARAM1, DP_MSA_HTOTAL, mask_sh),\
+ SE_SF(DP0_DP_MSA_TIMING_PARAM1, DP_MSA_VTOTAL, mask_sh),\
+ SE_SF(DP0_DP_MSA_TIMING_PARAM2, DP_MSA_HSTART, mask_sh),\
+ SE_SF(DP0_DP_MSA_TIMING_PARAM2, DP_MSA_VSTART, mask_sh),\
+ SE_SF(DP0_DP_MSA_TIMING_PARAM3, DP_MSA_HSYNCWIDTH, mask_sh),\
+ SE_SF(DP0_DP_MSA_TIMING_PARAM3, DP_MSA_HSYNCPOLARITY, mask_sh),\
+ SE_SF(DP0_DP_MSA_TIMING_PARAM3, DP_MSA_VSYNCWIDTH, mask_sh),\
+ SE_SF(DP0_DP_MSA_TIMING_PARAM3, DP_MSA_VSYNCPOLARITY, mask_sh),\
+ SE_SF(DP0_DP_MSA_TIMING_PARAM4, DP_MSA_HWIDTH, mask_sh),\
+ SE_SF(DP0_DP_MSA_TIMING_PARAM4, DP_MSA_VHEIGHT, mask_sh),\
+ SE_SF(DIG0_HDMI_DB_CONTROL, HDMI_DB_DISABLE, mask_sh),\
+ SE_SF(DP0_DP_VID_TIMING, DP_VID_N_MUL, mask_sh),\
+ SE_SF(DIG0_DIG_FE_CNTL, DIG_SOURCE_SELECT, mask_sh), \
+ SE_SF(DIG0_HDMI_GENERIC_PACKET_CONTROL0, HDMI_GENERIC0_CONT, mask_sh),\
+ SE_SF(DIG0_HDMI_GENERIC_PACKET_CONTROL0, HDMI_GENERIC0_SEND, mask_sh),\
+ SE_SF(DIG0_HDMI_GENERIC_PACKET_CONTROL0, HDMI_GENERIC1_CONT, mask_sh),\
+ SE_SF(DIG0_HDMI_GENERIC_PACKET_CONTROL0, HDMI_GENERIC1_SEND, mask_sh),\
+ SE_SF(DIG0_HDMI_GENERIC_PACKET_CONTROL0, HDMI_GENERIC2_CONT, mask_sh),\
+ SE_SF(DIG0_HDMI_GENERIC_PACKET_CONTROL0, HDMI_GENERIC2_SEND, mask_sh),\
+ SE_SF(DIG0_HDMI_GENERIC_PACKET_CONTROL0, HDMI_GENERIC3_CONT, mask_sh),\
+ SE_SF(DIG0_HDMI_GENERIC_PACKET_CONTROL0, HDMI_GENERIC3_SEND, mask_sh),\
+ SE_SF(DIG0_HDMI_GENERIC_PACKET_CONTROL0, HDMI_GENERIC4_CONT, mask_sh),\
+ SE_SF(DIG0_HDMI_GENERIC_PACKET_CONTROL0, HDMI_GENERIC4_SEND, mask_sh),\
+ SE_SF(DIG0_HDMI_GENERIC_PACKET_CONTROL0, HDMI_GENERIC5_CONT, mask_sh),\
+ SE_SF(DIG0_HDMI_GENERIC_PACKET_CONTROL0, HDMI_GENERIC5_SEND, mask_sh),\
+ SE_SF(DIG0_HDMI_GENERIC_PACKET_CONTROL0, HDMI_GENERIC6_CONT, mask_sh),\
+ SE_SF(DIG0_HDMI_GENERIC_PACKET_CONTROL0, HDMI_GENERIC6_SEND, mask_sh),\
+ SE_SF(DIG0_HDMI_GENERIC_PACKET_CONTROL0, HDMI_GENERIC7_CONT, mask_sh),\
+ SE_SF(DIG0_HDMI_GENERIC_PACKET_CONTROL0, HDMI_GENERIC7_SEND, mask_sh),\
+ SE_SF(DIG0_HDMI_GENERIC_PACKET_CONTROL6, HDMI_GENERIC8_CONT, mask_sh),\
+ SE_SF(DIG0_HDMI_GENERIC_PACKET_CONTROL6, HDMI_GENERIC8_SEND, mask_sh),\
+ SE_SF(DIG0_HDMI_GENERIC_PACKET_CONTROL6, HDMI_GENERIC9_CONT, mask_sh),\
+ SE_SF(DIG0_HDMI_GENERIC_PACKET_CONTROL6, HDMI_GENERIC9_SEND, mask_sh),\
+ SE_SF(DIG0_HDMI_GENERIC_PACKET_CONTROL6, HDMI_GENERIC10_CONT, mask_sh),\
+ SE_SF(DIG0_HDMI_GENERIC_PACKET_CONTROL6, HDMI_GENERIC10_SEND, mask_sh),\
+ SE_SF(DIG0_HDMI_GENERIC_PACKET_CONTROL6, HDMI_GENERIC11_CONT, mask_sh),\
+ SE_SF(DIG0_HDMI_GENERIC_PACKET_CONTROL6, HDMI_GENERIC11_SEND, mask_sh),\
+ SE_SF(DIG0_HDMI_GENERIC_PACKET_CONTROL6, HDMI_GENERIC12_CONT, mask_sh),\
+ SE_SF(DIG0_HDMI_GENERIC_PACKET_CONTROL6, HDMI_GENERIC12_SEND, mask_sh),\
+ SE_SF(DIG0_HDMI_GENERIC_PACKET_CONTROL6, HDMI_GENERIC13_CONT, mask_sh),\
+ SE_SF(DIG0_HDMI_GENERIC_PACKET_CONTROL6, HDMI_GENERIC13_SEND, mask_sh),\
+ SE_SF(DIG0_HDMI_GENERIC_PACKET_CONTROL6, HDMI_GENERIC14_CONT, mask_sh),\
+ SE_SF(DIG0_HDMI_GENERIC_PACKET_CONTROL6, HDMI_GENERIC14_SEND, mask_sh),\
+ SE_SF(DIG0_HDMI_GENERIC_PACKET_CONTROL1, HDMI_GENERIC0_LINE, mask_sh),\
+ SE_SF(DIG0_HDMI_GENERIC_PACKET_CONTROL1, HDMI_GENERIC1_LINE, mask_sh),\
+ SE_SF(DIG0_HDMI_GENERIC_PACKET_CONTROL2, HDMI_GENERIC2_LINE, mask_sh),\
+ SE_SF(DIG0_HDMI_GENERIC_PACKET_CONTROL2, HDMI_GENERIC3_LINE, mask_sh),\
+ SE_SF(DIG0_HDMI_GENERIC_PACKET_CONTROL3, HDMI_GENERIC4_LINE, mask_sh),\
+ SE_SF(DIG0_HDMI_GENERIC_PACKET_CONTROL3, HDMI_GENERIC5_LINE, mask_sh),\
+ SE_SF(DIG0_HDMI_GENERIC_PACKET_CONTROL4, HDMI_GENERIC6_LINE, mask_sh),\
+ SE_SF(DIG0_HDMI_GENERIC_PACKET_CONTROL4, HDMI_GENERIC7_LINE, mask_sh),\
+ SE_SF(DIG0_HDMI_GENERIC_PACKET_CONTROL7, HDMI_GENERIC8_LINE, mask_sh),\
+ SE_SF(DIG0_HDMI_GENERIC_PACKET_CONTROL7, HDMI_GENERIC9_LINE, mask_sh),\
+ SE_SF(DIG0_HDMI_GENERIC_PACKET_CONTROL8, HDMI_GENERIC10_LINE, mask_sh),\
+ SE_SF(DIG0_HDMI_GENERIC_PACKET_CONTROL8, HDMI_GENERIC11_LINE, mask_sh),\
+ SE_SF(DIG0_HDMI_GENERIC_PACKET_CONTROL9, HDMI_GENERIC12_LINE, mask_sh),\
+ SE_SF(DIG0_HDMI_GENERIC_PACKET_CONTROL9, HDMI_GENERIC13_LINE, mask_sh),\
+ SE_SF(DIG0_HDMI_GENERIC_PACKET_CONTROL10, HDMI_GENERIC14_LINE, mask_sh),\
+ SE_SF(DP0_DP_DSC_CNTL, DP_DSC_MODE, mask_sh),\
+ SE_SF(DP0_DP_DSC_CNTL, DP_DSC_SLICE_WIDTH, mask_sh),\
+ SE_SF(DP0_DP_DSC_BYTES_PER_PIXEL, DP_DSC_BYTES_PER_PIXEL, mask_sh),\
+ SE_SF(DP0_DP_MSA_VBID_MISC, DP_VBID6_LINE_REFERENCE, mask_sh),\
+ SE_SF(DP0_DP_MSA_VBID_MISC, DP_VBID6_LINE_NUM, mask_sh),\
+ SE_SF(DME0_DME_CONTROL, METADATA_ENGINE_EN, mask_sh),\
+ SE_SF(DME0_DME_CONTROL, METADATA_HUBP_REQUESTOR_ID, mask_sh),\
+ SE_SF(DME0_DME_CONTROL, METADATA_STREAM_TYPE, mask_sh),\
+ SE_SF(DP0_DP_SEC_METADATA_TRANSMISSION, DP_SEC_METADATA_PACKET_ENABLE, mask_sh),\
+ SE_SF(DP0_DP_SEC_METADATA_TRANSMISSION, DP_SEC_METADATA_PACKET_LINE_REFERENCE, mask_sh),\
+ SE_SF(DP0_DP_SEC_METADATA_TRANSMISSION, DP_SEC_METADATA_PACKET_LINE, mask_sh),\
+ SE_SF(DIG0_HDMI_METADATA_PACKET_CONTROL, HDMI_METADATA_PACKET_ENABLE, mask_sh),\
+ SE_SF(DIG0_HDMI_METADATA_PACKET_CONTROL, HDMI_METADATA_PACKET_LINE_REFERENCE, mask_sh),\
+ SE_SF(DIG0_HDMI_METADATA_PACKET_CONTROL, HDMI_METADATA_PACKET_LINE, mask_sh),\
+ SE_SF(DP0_DP_PIXEL_FORMAT, DP_PIXEL_COMBINE, mask_sh),\
+ SE_SF(DP0_DP_SEC_FRAMING4, DP_SST_SDP_SPLITTING, mask_sh),\
+ SE_SF(DIG0_DIG_CLOCK_PATTERN, DIG_CLOCK_PATTERN, mask_sh)
+
+#define SE_COMMON_MASK_SH_LIST_DCN30(mask_sh)\
+ SE_COMMON_MASK_SH_LIST_DCN30_BASE(mask_sh)
+
+void dcn30_dio_stream_encoder_construct(
+ struct dcn10_stream_encoder *enc1,
+ struct dc_context *ctx,
+ struct dc_bios *bp,
+ enum engine_id eng_id,
+ struct vpg *vpg,
+ struct afmt *afmt,
+ const struct dcn10_stream_enc_registers *regs,
+ const struct dcn10_stream_encoder_shift *se_shift,
+ const struct dcn10_stream_encoder_mask *se_mask);
+
+#endif /* __DC_DIO_STREAM_ENCODER_DCN30_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dpp.c b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dpp.c
new file mode 100644
index 000000000000..0eb881f2e0d6
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dpp.c
@@ -0,0 +1,1414 @@
+/*
+ * Copyright 2020 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#include "dm_services.h"
+#include "core_types.h"
+#include "reg_helper.h"
+#include "dcn30_dpp.h"
+#include "basics/conversion.h"
+#include "dcn30_cm_common.h"
+
+#define REG(reg)\
+ dpp->tf_regs->reg
+
+#define CTX \
+ dpp->base.ctx
+
+#undef FN
+#define FN(reg_name, field_name) \
+ dpp->tf_shift->field_name, dpp->tf_mask->field_name
+
+
+void dpp30_read_state(struct dpp *dpp_base,
+ struct dcn_dpp_state *s)
+{
+ struct dcn20_dpp *dpp = TO_DCN20_DPP(dpp_base);
+
+ REG_GET(DPP_CONTROL,
+ DPP_CLOCK_ENABLE, &s->is_enabled);
+
+ // TODO: Implement for DCN3
+}
+/*program post scaler scs block in dpp CM*/
+void dpp3_program_post_csc(
+ struct dpp *dpp_base,
+ enum dc_color_space color_space,
+ enum dcn10_input_csc_select input_select,
+ const struct out_csc_color_matrix *tbl_entry)
+{
+ struct dcn3_dpp *dpp = TO_DCN30_DPP(dpp_base);
+ int i;
+ int arr_size = sizeof(dpp_input_csc_matrix)/sizeof(struct dpp_input_csc_matrix);
+ const uint16_t *regval = NULL;
+ uint32_t cur_select = 0;
+ enum dcn10_input_csc_select select;
+ struct color_matrices_reg gam_regs;
+
+ if (input_select == INPUT_CSC_SELECT_BYPASS) {
+ REG_SET(CM_POST_CSC_CONTROL, 0, CM_POST_CSC_MODE, 0);
+ return;
+ }
+
+ if (tbl_entry == NULL) {
+ for (i = 0; i < arr_size; i++)
+ if (dpp_input_csc_matrix[i].color_space == color_space) {
+ regval = dpp_input_csc_matrix[i].regval;
+ break;
+ }
+
+ if (regval == NULL) {
+ BREAK_TO_DEBUGGER();
+ return;
+ }
+ } else {
+ regval = tbl_entry->regval;
+ }
+
+ /* determine which CSC matrix (icsc or coma) we are using
+ * currently. select the alternate set to double buffer
+ * the CSC update so CSC is updated on frame boundary
+ */
+ REG_GET(CM_POST_CSC_CONTROL,
+ CM_POST_CSC_MODE_CURRENT, &cur_select);
+
+ if (cur_select != INPUT_CSC_SELECT_ICSC)
+ select = INPUT_CSC_SELECT_ICSC;
+ else
+ select = INPUT_CSC_SELECT_COMA;
+
+ gam_regs.shifts.csc_c11 = dpp->tf_shift->CM_POST_CSC_C11;
+ gam_regs.masks.csc_c11 = dpp->tf_mask->CM_POST_CSC_C11;
+ gam_regs.shifts.csc_c12 = dpp->tf_shift->CM_POST_CSC_C12;
+ gam_regs.masks.csc_c12 = dpp->tf_mask->CM_POST_CSC_C12;
+
+ if (select == INPUT_CSC_SELECT_ICSC) {
+
+ gam_regs.csc_c11_c12 = REG(CM_POST_CSC_C11_C12);
+ gam_regs.csc_c33_c34 = REG(CM_POST_CSC_C33_C34);
+
+ } else {
+
+ gam_regs.csc_c11_c12 = REG(CM_POST_CSC_B_C11_C12);
+ gam_regs.csc_c33_c34 = REG(CM_POST_CSC_B_C33_C34);
+
+ }
+
+ cm_helper_program_color_matrices(
+ dpp->base.ctx,
+ regval,
+ &gam_regs);
+
+ REG_SET(CM_POST_CSC_CONTROL, 0,
+ CM_POST_CSC_MODE, select);
+}
+
+
+/*CNVC degam unit has read only LUTs*/
+void dpp3_set_pre_degam(struct dpp *dpp_base, enum dc_transfer_func_predefined tr)
+{
+ struct dcn3_dpp *dpp = TO_DCN30_DPP(dpp_base);
+ int pre_degam_en = 1;
+ int degamma_lut_selection = 0;
+
+ switch (tr) {
+ case TRANSFER_FUNCTION_LINEAR:
+ case TRANSFER_FUNCTION_UNITY:
+ pre_degam_en = 0; //bypass
+ break;
+ case TRANSFER_FUNCTION_SRGB:
+ degamma_lut_selection = 0;
+ break;
+ case TRANSFER_FUNCTION_BT709:
+ degamma_lut_selection = 4;
+ break;
+ case TRANSFER_FUNCTION_PQ:
+ degamma_lut_selection = 5;
+ break;
+ case TRANSFER_FUNCTION_HLG:
+ degamma_lut_selection = 6;
+ break;
+ case TRANSFER_FUNCTION_GAMMA22:
+ degamma_lut_selection = 1;
+ break;
+ case TRANSFER_FUNCTION_GAMMA24:
+ degamma_lut_selection = 2;
+ break;
+ case TRANSFER_FUNCTION_GAMMA26:
+ degamma_lut_selection = 3;
+ break;
+ default:
+ pre_degam_en = 0;
+ break;
+ }
+
+ REG_SET_2(PRE_DEGAM, 0,
+ PRE_DEGAM_MODE, pre_degam_en,
+ PRE_DEGAM_SELECT, degamma_lut_selection);
+}
+
+static void dpp3_cnv_setup (
+ struct dpp *dpp_base,
+ enum surface_pixel_format format,
+ enum expansion_mode mode,
+ struct dc_csc_transform input_csc_color_matrix,
+ enum dc_color_space input_color_space,
+ struct cnv_alpha_2bit_lut *alpha_2bit_lut)
+{
+ struct dcn3_dpp *dpp = TO_DCN30_DPP(dpp_base);
+ uint32_t pixel_format = 0;
+ uint32_t alpha_en = 1;
+ enum dc_color_space color_space = COLOR_SPACE_SRGB;
+ enum dcn10_input_csc_select select = INPUT_CSC_SELECT_BYPASS;
+ bool force_disable_cursor = false;
+ uint32_t is_2bit = 0;
+ uint32_t alpha_plane_enable = 0;
+ uint32_t dealpha_en = 0, dealpha_ablnd_en = 0;
+ uint32_t realpha_en = 0, realpha_ablnd_en = 0;
+ uint32_t program_prealpha_dealpha = 0;
+ struct out_csc_color_matrix tbl_entry;
+ int i;
+
+ REG_SET_2(FORMAT_CONTROL, 0,
+ CNVC_BYPASS, 0,
+ FORMAT_EXPANSION_MODE, mode);
+
+ REG_UPDATE(FORMAT_CONTROL, FORMAT_CNV16, 0);
+ REG_UPDATE(FORMAT_CONTROL, CNVC_BYPASS_MSB_ALIGN, 0);
+ REG_UPDATE(FORMAT_CONTROL, CLAMP_POSITIVE, 0);
+ REG_UPDATE(FORMAT_CONTROL, CLAMP_POSITIVE_C, 0);
+
+ REG_UPDATE(FORMAT_CONTROL, FORMAT_CROSSBAR_R, 0);
+ REG_UPDATE(FORMAT_CONTROL, FORMAT_CROSSBAR_G, 1);
+ REG_UPDATE(FORMAT_CONTROL, FORMAT_CROSSBAR_B, 2);
+
+ switch (format) {
+ case SURFACE_PIXEL_FORMAT_GRPH_ARGB1555:
+ pixel_format = 1;
+ break;
+ case SURFACE_PIXEL_FORMAT_GRPH_RGB565:
+ pixel_format = 3;
+ alpha_en = 0;
+ break;
+ case SURFACE_PIXEL_FORMAT_GRPH_ARGB8888:
+ case SURFACE_PIXEL_FORMAT_GRPH_ABGR8888:
+ pixel_format = 8;
+ break;
+ case SURFACE_PIXEL_FORMAT_GRPH_ARGB2101010:
+ case SURFACE_PIXEL_FORMAT_GRPH_ABGR2101010:
+ pixel_format = 10;
+ is_2bit = 1;
+ break;
+ case SURFACE_PIXEL_FORMAT_VIDEO_420_YCbCr:
+ force_disable_cursor = false;
+ pixel_format = 65;
+ color_space = COLOR_SPACE_YCBCR709;
+ select = INPUT_CSC_SELECT_ICSC;
+ break;
+ case SURFACE_PIXEL_FORMAT_VIDEO_420_YCrCb:
+ force_disable_cursor = true;
+ pixel_format = 64;
+ color_space = COLOR_SPACE_YCBCR709;
+ select = INPUT_CSC_SELECT_ICSC;
+ break;
+ case SURFACE_PIXEL_FORMAT_VIDEO_420_10bpc_YCbCr:
+ force_disable_cursor = true;
+ pixel_format = 67;
+ color_space = COLOR_SPACE_YCBCR709;
+ select = INPUT_CSC_SELECT_ICSC;
+ break;
+ case SURFACE_PIXEL_FORMAT_VIDEO_420_10bpc_YCrCb:
+ force_disable_cursor = true;
+ pixel_format = 66;
+ color_space = COLOR_SPACE_YCBCR709;
+ select = INPUT_CSC_SELECT_ICSC;
+ break;
+ case SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616:
+ pixel_format = 22;
+ break;
+ case SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616F:
+ pixel_format = 24;
+ break;
+ case SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616F:
+ pixel_format = 25;
+ break;
+ case SURFACE_PIXEL_FORMAT_VIDEO_AYCrCb8888:
+ pixel_format = 12;
+ color_space = COLOR_SPACE_YCBCR709;
+ select = INPUT_CSC_SELECT_ICSC;
+ break;
+ case SURFACE_PIXEL_FORMAT_GRPH_RGB111110_FIX:
+ pixel_format = 112;
+ break;
+ case SURFACE_PIXEL_FORMAT_GRPH_BGR101111_FIX:
+ pixel_format = 113;
+ break;
+ case SURFACE_PIXEL_FORMAT_VIDEO_ACrYCb2101010:
+ pixel_format = 114;
+ color_space = COLOR_SPACE_YCBCR709;
+ select = INPUT_CSC_SELECT_ICSC;
+ is_2bit = 1;
+ break;
+ case SURFACE_PIXEL_FORMAT_VIDEO_CrYCbA1010102:
+ pixel_format = 115;
+ color_space = COLOR_SPACE_YCBCR709;
+ select = INPUT_CSC_SELECT_ICSC;
+ is_2bit = 1;
+ break;
+ case SURFACE_PIXEL_FORMAT_GRPH_RGBE:
+ pixel_format = 116;
+ alpha_plane_enable = 0;
+ break;
+ case SURFACE_PIXEL_FORMAT_GRPH_RGBE_ALPHA:
+ pixel_format = 116;
+ alpha_plane_enable = 1;
+ break;
+ case SURFACE_PIXEL_FORMAT_GRPH_RGB111110_FLOAT:
+ pixel_format = 118;
+ break;
+ case SURFACE_PIXEL_FORMAT_GRPH_BGR101111_FLOAT:
+ pixel_format = 119;
+ break;
+ default:
+ break;
+ }
+
+ if (is_2bit == 1 && alpha_2bit_lut != NULL) {
+ REG_UPDATE(ALPHA_2BIT_LUT, ALPHA_2BIT_LUT0, alpha_2bit_lut->lut0);
+ REG_UPDATE(ALPHA_2BIT_LUT, ALPHA_2BIT_LUT1, alpha_2bit_lut->lut1);
+ REG_UPDATE(ALPHA_2BIT_LUT, ALPHA_2BIT_LUT2, alpha_2bit_lut->lut2);
+ REG_UPDATE(ALPHA_2BIT_LUT, ALPHA_2BIT_LUT3, alpha_2bit_lut->lut3);
+ }
+
+ REG_SET_2(CNVC_SURFACE_PIXEL_FORMAT, 0,
+ CNVC_SURFACE_PIXEL_FORMAT, pixel_format,
+ CNVC_ALPHA_PLANE_ENABLE, alpha_plane_enable);
+ REG_UPDATE(FORMAT_CONTROL, FORMAT_CONTROL__ALPHA_EN, alpha_en);
+
+ if (program_prealpha_dealpha) {
+ dealpha_en = 1;
+ realpha_en = 1;
+ }
+ REG_SET_2(PRE_DEALPHA, 0,
+ PRE_DEALPHA_EN, dealpha_en,
+ PRE_DEALPHA_ABLND_EN, dealpha_ablnd_en);
+ REG_SET_2(PRE_REALPHA, 0,
+ PRE_REALPHA_EN, realpha_en,
+ PRE_REALPHA_ABLND_EN, realpha_ablnd_en);
+
+ /* If input adjustment exists, program the ICSC with those values. */
+ if (input_csc_color_matrix.enable_adjustment == true) {
+ for (i = 0; i < 12; i++)
+ tbl_entry.regval[i] = input_csc_color_matrix.matrix[i];
+
+ tbl_entry.color_space = input_color_space;
+
+ if (color_space >= COLOR_SPACE_YCBCR601)
+ select = INPUT_CSC_SELECT_ICSC;
+ else
+ select = INPUT_CSC_SELECT_BYPASS;
+
+ dpp3_program_post_csc(dpp_base, color_space, select,
+ &tbl_entry);
+ } else {
+ dpp3_program_post_csc(dpp_base, color_space, select, NULL);
+ }
+
+ if (force_disable_cursor) {
+ REG_UPDATE(CURSOR_CONTROL,
+ CURSOR_ENABLE, 0);
+ REG_UPDATE(CURSOR0_CONTROL,
+ CUR0_ENABLE, 0);
+ }
+}
+
+#define IDENTITY_RATIO(ratio) (dc_fixpt_u3d19(ratio) == (1 << 19))
+
+void dpp3_set_cursor_attributes(
+ struct dpp *dpp_base,
+ struct dc_cursor_attributes *cursor_attributes)
+{
+ enum dc_cursor_color_format color_format = cursor_attributes->color_format;
+ struct dcn3_dpp *dpp = TO_DCN30_DPP(dpp_base);
+ int cur_rom_en = 0;
+
+ if (color_format == CURSOR_MODE_COLOR_PRE_MULTIPLIED_ALPHA ||
+ color_format == CURSOR_MODE_COLOR_UN_PRE_MULTIPLIED_ALPHA)
+ cur_rom_en = 1;
+
+ REG_UPDATE_3(CURSOR0_CONTROL,
+ CUR0_MODE, color_format,
+ CUR0_EXPANSION_MODE, 0,
+ CUR0_ROM_EN, cur_rom_en);
+
+ if (color_format == CURSOR_MODE_MONO) {
+ /* todo: clarify what to program these to */
+ REG_UPDATE(CURSOR0_COLOR0,
+ CUR0_COLOR0, 0x00000000);
+ REG_UPDATE(CURSOR0_COLOR1,
+ CUR0_COLOR1, 0xFFFFFFFF);
+ }
+}
+
+
+bool dpp3_get_optimal_number_of_taps(
+ struct dpp *dpp,
+ struct scaler_data *scl_data,
+ const struct scaling_taps *in_taps)
+{
+ int num_part_y, num_part_c;
+ int max_taps_y, max_taps_c;
+ int min_taps_y, min_taps_c;
+ enum lb_memory_config lb_config;
+
+ /* Some ASICs does not support FP16 scaling, so we reject modes require this*/
+ if (scl_data->viewport.width != scl_data->h_active &&
+ scl_data->viewport.height != scl_data->v_active &&
+ dpp->caps->dscl_data_proc_format == DSCL_DATA_PRCESSING_FIXED_FORMAT &&
+ scl_data->format == PIXEL_FORMAT_FP16)
+ return false;
+
+ if (scl_data->viewport.width > scl_data->h_active &&
+ dpp->ctx->dc->debug.max_downscale_src_width != 0 &&
+ scl_data->viewport.width > dpp->ctx->dc->debug.max_downscale_src_width)
+ return false;
+
+ /*
+ * Set default taps if none are provided
+ * From programming guide: taps = min{ ceil(2*H_RATIO,1), 8} for downscaling
+ * taps = 4 for upscaling
+ */
+ if (in_taps->h_taps == 0) {
+ if (dc_fixpt_ceil(scl_data->ratios.horz) > 1)
+ scl_data->taps.h_taps = min(2 * dc_fixpt_ceil(scl_data->ratios.horz), 8);
+ else
+ scl_data->taps.h_taps = 4;
+ } else
+ scl_data->taps.h_taps = in_taps->h_taps;
+ if (in_taps->v_taps == 0) {
+ if (dc_fixpt_ceil(scl_data->ratios.vert) > 1)
+ scl_data->taps.v_taps = min(dc_fixpt_ceil(dc_fixpt_mul_int(scl_data->ratios.vert, 2)), 8);
+ else
+ scl_data->taps.v_taps = 4;
+ } else
+ scl_data->taps.v_taps = in_taps->v_taps;
+ if (in_taps->v_taps_c == 0) {
+ if (dc_fixpt_ceil(scl_data->ratios.vert_c) > 1)
+ scl_data->taps.v_taps_c = min(dc_fixpt_ceil(dc_fixpt_mul_int(scl_data->ratios.vert_c, 2)), 8);
+ else
+ scl_data->taps.v_taps_c = 4;
+ } else
+ scl_data->taps.v_taps_c = in_taps->v_taps_c;
+ if (in_taps->h_taps_c == 0) {
+ if (dc_fixpt_ceil(scl_data->ratios.horz_c) > 1)
+ scl_data->taps.h_taps_c = min(2 * dc_fixpt_ceil(scl_data->ratios.horz_c), 8);
+ else
+ scl_data->taps.h_taps_c = 4;
+ } else if ((in_taps->h_taps_c % 2) != 0 && in_taps->h_taps_c != 1)
+ /* Only 1 and even h_taps_c are supported by hw */
+ scl_data->taps.h_taps_c = in_taps->h_taps_c - 1;
+ else
+ scl_data->taps.h_taps_c = in_taps->h_taps_c;
+
+ /*Ensure we can support the requested number of vtaps*/
+ min_taps_y = dc_fixpt_ceil(scl_data->ratios.vert);
+ min_taps_c = dc_fixpt_ceil(scl_data->ratios.vert_c);
+
+ /* Use LB_MEMORY_CONFIG_3 for 4:2:0 */
+ if ((scl_data->format == PIXEL_FORMAT_420BPP8) || (scl_data->format == PIXEL_FORMAT_420BPP10))
+ lb_config = LB_MEMORY_CONFIG_3;
+ else
+ lb_config = LB_MEMORY_CONFIG_0;
+
+ dpp->caps->dscl_calc_lb_num_partitions(
+ scl_data, lb_config, &num_part_y, &num_part_c);
+
+ /* MAX_V_TAPS = MIN (NUM_LINES - MAX(CEILING(V_RATIO,1)-2, 0), 8) */
+ if (dc_fixpt_ceil(scl_data->ratios.vert) > 2)
+ max_taps_y = num_part_y - (dc_fixpt_ceil(scl_data->ratios.vert) - 2);
+ else
+ max_taps_y = num_part_y;
+
+ if (dc_fixpt_ceil(scl_data->ratios.vert_c) > 2)
+ max_taps_c = num_part_c - (dc_fixpt_ceil(scl_data->ratios.vert_c) - 2);
+ else
+ max_taps_c = num_part_c;
+
+ if (max_taps_y < min_taps_y)
+ return false;
+ else if (max_taps_c < min_taps_c)
+ return false;
+
+ if (scl_data->taps.v_taps > max_taps_y)
+ scl_data->taps.v_taps = max_taps_y;
+
+ if (scl_data->taps.v_taps_c > max_taps_c)
+ scl_data->taps.v_taps_c = max_taps_c;
+
+ if (!dpp->ctx->dc->debug.always_scale) {
+ if (IDENTITY_RATIO(scl_data->ratios.horz))
+ scl_data->taps.h_taps = 1;
+ if (IDENTITY_RATIO(scl_data->ratios.vert))
+ scl_data->taps.v_taps = 1;
+ if (IDENTITY_RATIO(scl_data->ratios.horz_c))
+ scl_data->taps.h_taps_c = 1;
+ if (IDENTITY_RATIO(scl_data->ratios.vert_c))
+ scl_data->taps.v_taps_c = 1;
+ }
+
+ return true;
+}
+
+void dpp3_cnv_set_bias_scale(
+ struct dpp *dpp_base,
+ struct dc_bias_and_scale *bias_and_scale)
+{
+ struct dcn3_dpp *dpp = TO_DCN30_DPP(dpp_base);
+
+ REG_UPDATE(FCNV_FP_BIAS_R, FCNV_FP_BIAS_R, bias_and_scale->bias_red);
+ REG_UPDATE(FCNV_FP_BIAS_G, FCNV_FP_BIAS_G, bias_and_scale->bias_green);
+ REG_UPDATE(FCNV_FP_BIAS_B, FCNV_FP_BIAS_B, bias_and_scale->bias_blue);
+ REG_UPDATE(FCNV_FP_SCALE_R, FCNV_FP_SCALE_R, bias_and_scale->scale_red);
+ REG_UPDATE(FCNV_FP_SCALE_G, FCNV_FP_SCALE_G, bias_and_scale->scale_green);
+ REG_UPDATE(FCNV_FP_SCALE_B, FCNV_FP_SCALE_B, bias_and_scale->scale_blue);
+}
+
+static void dpp3_power_on_blnd_lut(
+ struct dpp *dpp_base,
+ bool power_on)
+{
+ struct dcn3_dpp *dpp = TO_DCN30_DPP(dpp_base);
+
+ REG_SET(CM_MEM_PWR_CTRL, 0,
+ BLNDGAM_MEM_PWR_FORCE, power_on == true ? 0:1);
+
+}
+
+static void dpp3_configure_blnd_lut(
+ struct dpp *dpp_base,
+ bool is_ram_a)
+{
+ struct dcn3_dpp *dpp = TO_DCN30_DPP(dpp_base);
+
+ REG_UPDATE_2(CM_BLNDGAM_LUT_CONTROL,
+ CM_BLNDGAM_LUT_WRITE_COLOR_MASK, 7,
+ CM_BLNDGAM_LUT_HOST_SEL, is_ram_a == true ? 0 : 1);
+
+ REG_SET(CM_BLNDGAM_LUT_INDEX, 0, CM_BLNDGAM_LUT_INDEX, 0);
+}
+
+static void dpp3_program_blnd_pwl(
+ struct dpp *dpp_base,
+ const struct pwl_result_data *rgb,
+ uint32_t num)
+{
+ uint32_t i;
+ struct dcn3_dpp *dpp = TO_DCN30_DPP(dpp_base);
+ uint32_t last_base_value_red = rgb[num-1].red_reg + rgb[num-1].delta_red_reg;
+ uint32_t last_base_value_green = rgb[num-1].green_reg + rgb[num-1].delta_green_reg;
+ uint32_t last_base_value_blue = rgb[num-1].blue_reg + rgb[num-1].delta_blue_reg;
+
+ if (is_rgb_equal(rgb, num)) {
+ for (i = 0 ; i < num; i++)
+ REG_SET(CM_BLNDGAM_LUT_DATA, 0, CM_BLNDGAM_LUT_DATA, rgb[i].red_reg);
+ REG_SET(CM_BLNDGAM_LUT_DATA, 0, CM_BLNDGAM_LUT_DATA, last_base_value_red);
+ } else {
+ REG_UPDATE(CM_BLNDGAM_LUT_CONTROL, CM_BLNDGAM_LUT_WRITE_COLOR_MASK, 4);
+ for (i = 0 ; i < num; i++)
+ REG_SET(CM_BLNDGAM_LUT_DATA, 0, CM_BLNDGAM_LUT_DATA, rgb[i].red_reg);
+ REG_SET(CM_BLNDGAM_LUT_DATA, 0, CM_BLNDGAM_LUT_DATA, last_base_value_red);
+
+ REG_UPDATE(CM_BLNDGAM_LUT_CONTROL, CM_BLNDGAM_LUT_WRITE_COLOR_MASK, 2);
+ for (i = 0 ; i < num; i++)
+ REG_SET(CM_BLNDGAM_LUT_DATA, 0, CM_BLNDGAM_LUT_DATA, rgb[i].green_reg);
+ REG_SET(CM_BLNDGAM_LUT_DATA, 0, CM_BLNDGAM_LUT_DATA, last_base_value_green);
+
+ REG_UPDATE(CM_BLNDGAM_LUT_CONTROL, CM_BLNDGAM_LUT_WRITE_COLOR_MASK, 1);
+ for (i = 0 ; i < num; i++)
+ REG_SET(CM_BLNDGAM_LUT_DATA, 0, CM_BLNDGAM_LUT_DATA, rgb[i].blue_reg);
+ REG_SET(CM_BLNDGAM_LUT_DATA, 0, CM_BLNDGAM_LUT_DATA, last_base_value_blue);
+ }
+}
+
+static void dcn3_dpp_cm_get_reg_field(
+ struct dcn3_dpp *dpp,
+ struct dcn3_xfer_func_reg *reg)
+{
+ reg->shifts.exp_region0_lut_offset = dpp->tf_shift->CM_BLNDGAM_RAMA_EXP_REGION0_LUT_OFFSET;
+ reg->masks.exp_region0_lut_offset = dpp->tf_mask->CM_BLNDGAM_RAMA_EXP_REGION0_LUT_OFFSET;
+ reg->shifts.exp_region0_num_segments = dpp->tf_shift->CM_BLNDGAM_RAMA_EXP_REGION0_NUM_SEGMENTS;
+ reg->masks.exp_region0_num_segments = dpp->tf_mask->CM_BLNDGAM_RAMA_EXP_REGION0_NUM_SEGMENTS;
+ reg->shifts.exp_region1_lut_offset = dpp->tf_shift->CM_BLNDGAM_RAMA_EXP_REGION1_LUT_OFFSET;
+ reg->masks.exp_region1_lut_offset = dpp->tf_mask->CM_BLNDGAM_RAMA_EXP_REGION1_LUT_OFFSET;
+ reg->shifts.exp_region1_num_segments = dpp->tf_shift->CM_BLNDGAM_RAMA_EXP_REGION1_NUM_SEGMENTS;
+ reg->masks.exp_region1_num_segments = dpp->tf_mask->CM_BLNDGAM_RAMA_EXP_REGION1_NUM_SEGMENTS;
+
+ reg->shifts.field_region_end = dpp->tf_shift->CM_BLNDGAM_RAMA_EXP_REGION_END_B;
+ reg->masks.field_region_end = dpp->tf_mask->CM_BLNDGAM_RAMA_EXP_REGION_END_B;
+ reg->shifts.field_region_end_slope = dpp->tf_shift->CM_BLNDGAM_RAMA_EXP_REGION_END_SLOPE_B;
+ reg->masks.field_region_end_slope = dpp->tf_mask->CM_BLNDGAM_RAMA_EXP_REGION_END_SLOPE_B;
+ reg->shifts.field_region_end_base = dpp->tf_shift->CM_BLNDGAM_RAMA_EXP_REGION_END_BASE_B;
+ reg->masks.field_region_end_base = dpp->tf_mask->CM_BLNDGAM_RAMA_EXP_REGION_END_BASE_B;
+ reg->shifts.field_region_linear_slope = dpp->tf_shift->CM_BLNDGAM_RAMA_EXP_REGION_START_SLOPE_B;
+ reg->masks.field_region_linear_slope = dpp->tf_mask->CM_BLNDGAM_RAMA_EXP_REGION_START_SLOPE_B;
+ reg->shifts.exp_region_start = dpp->tf_shift->CM_BLNDGAM_RAMA_EXP_REGION_START_B;
+ reg->masks.exp_region_start = dpp->tf_mask->CM_BLNDGAM_RAMA_EXP_REGION_START_B;
+ reg->shifts.exp_resion_start_segment = dpp->tf_shift->CM_BLNDGAM_RAMA_EXP_REGION_START_SEGMENT_B;
+ reg->masks.exp_resion_start_segment = dpp->tf_mask->CM_BLNDGAM_RAMA_EXP_REGION_START_SEGMENT_B;
+}
+
+/*program blnd lut RAM A*/
+static void dpp3_program_blnd_luta_settings(
+ struct dpp *dpp_base,
+ const struct pwl_params *params)
+{
+ struct dcn3_dpp *dpp = TO_DCN30_DPP(dpp_base);
+ struct dcn3_xfer_func_reg gam_regs;
+
+ dcn3_dpp_cm_get_reg_field(dpp, &gam_regs);
+
+ gam_regs.start_cntl_b = REG(CM_BLNDGAM_RAMA_START_CNTL_B);
+ gam_regs.start_cntl_g = REG(CM_BLNDGAM_RAMA_START_CNTL_G);
+ gam_regs.start_cntl_r = REG(CM_BLNDGAM_RAMA_START_CNTL_R);
+ gam_regs.start_slope_cntl_b = REG(CM_BLNDGAM_RAMA_START_SLOPE_CNTL_B);
+ gam_regs.start_slope_cntl_g = REG(CM_BLNDGAM_RAMA_START_SLOPE_CNTL_G);
+ gam_regs.start_slope_cntl_r = REG(CM_BLNDGAM_RAMA_START_SLOPE_CNTL_R);
+ gam_regs.start_end_cntl1_b = REG(CM_BLNDGAM_RAMA_END_CNTL1_B);
+ gam_regs.start_end_cntl2_b = REG(CM_BLNDGAM_RAMA_END_CNTL2_B);
+ gam_regs.start_end_cntl1_g = REG(CM_BLNDGAM_RAMA_END_CNTL1_G);
+ gam_regs.start_end_cntl2_g = REG(CM_BLNDGAM_RAMA_END_CNTL2_G);
+ gam_regs.start_end_cntl1_r = REG(CM_BLNDGAM_RAMA_END_CNTL1_R);
+ gam_regs.start_end_cntl2_r = REG(CM_BLNDGAM_RAMA_END_CNTL2_R);
+ gam_regs.region_start = REG(CM_BLNDGAM_RAMA_REGION_0_1);
+ gam_regs.region_end = REG(CM_BLNDGAM_RAMA_REGION_32_33);
+
+ cm_helper_program_gamcor_xfer_func(dpp->base.ctx, params, &gam_regs);
+}
+
+/*program blnd lut RAM B*/
+static void dpp3_program_blnd_lutb_settings(
+ struct dpp *dpp_base,
+ const struct pwl_params *params)
+{
+ struct dcn3_dpp *dpp = TO_DCN30_DPP(dpp_base);
+ struct dcn3_xfer_func_reg gam_regs;
+
+ dcn3_dpp_cm_get_reg_field(dpp, &gam_regs);
+
+ gam_regs.start_cntl_b = REG(CM_BLNDGAM_RAMB_START_CNTL_B);
+ gam_regs.start_cntl_g = REG(CM_BLNDGAM_RAMB_START_CNTL_G);
+ gam_regs.start_cntl_r = REG(CM_BLNDGAM_RAMB_START_CNTL_R);
+ gam_regs.start_slope_cntl_b = REG(CM_BLNDGAM_RAMB_START_SLOPE_CNTL_B);
+ gam_regs.start_slope_cntl_g = REG(CM_BLNDGAM_RAMB_START_SLOPE_CNTL_G);
+ gam_regs.start_slope_cntl_r = REG(CM_BLNDGAM_RAMB_START_SLOPE_CNTL_R);
+ gam_regs.start_end_cntl1_b = REG(CM_BLNDGAM_RAMB_END_CNTL1_B);
+ gam_regs.start_end_cntl2_b = REG(CM_BLNDGAM_RAMB_END_CNTL2_B);
+ gam_regs.start_end_cntl1_g = REG(CM_BLNDGAM_RAMB_END_CNTL1_G);
+ gam_regs.start_end_cntl2_g = REG(CM_BLNDGAM_RAMB_END_CNTL2_G);
+ gam_regs.start_end_cntl1_r = REG(CM_BLNDGAM_RAMB_END_CNTL1_R);
+ gam_regs.start_end_cntl2_r = REG(CM_BLNDGAM_RAMB_END_CNTL2_R);
+ gam_regs.region_start = REG(CM_BLNDGAM_RAMB_REGION_0_1);
+ gam_regs.region_end = REG(CM_BLNDGAM_RAMB_REGION_32_33);
+
+ cm_helper_program_gamcor_xfer_func(dpp->base.ctx, params, &gam_regs);
+}
+
+static enum dc_lut_mode dpp3_get_blndgam_current(struct dpp *dpp_base)
+{
+ enum dc_lut_mode mode;
+ uint32_t mode_current = 0;
+ uint32_t in_use = 0;
+
+ struct dcn3_dpp *dpp = TO_DCN30_DPP(dpp_base);
+
+ REG_GET(CM_BLNDGAM_CONTROL,
+ CM_BLNDGAM_MODE_CURRENT, &mode_current);
+ REG_GET(CM_BLNDGAM_CONTROL,
+ CM_BLNDGAM_SELECT_CURRENT, &in_use);
+
+ switch (mode_current) {
+ case 0:
+ case 1:
+ mode = LUT_BYPASS;
+ break;
+
+ case 2:
+ if (in_use == 0)
+ mode = LUT_RAM_A;
+ else
+ mode = LUT_RAM_B;
+ break;
+ default:
+ mode = LUT_BYPASS;
+ break;
+ }
+ return mode;
+}
+
+bool dpp3_program_blnd_lut(
+ struct dpp *dpp_base, const struct pwl_params *params)
+{
+ enum dc_lut_mode current_mode;
+ enum dc_lut_mode next_mode;
+ struct dcn3_dpp *dpp = TO_DCN30_DPP(dpp_base);
+
+ if (params == NULL) {
+ REG_SET(CM_BLNDGAM_CONTROL, 0, CM_BLNDGAM_MODE, 0);
+ return false;
+ }
+
+ current_mode = dpp3_get_blndgam_current(dpp_base);
+ if (current_mode == LUT_BYPASS || current_mode == LUT_RAM_B)
+ next_mode = LUT_RAM_A;
+ else
+ next_mode = LUT_RAM_B;
+
+ dpp3_power_on_blnd_lut(dpp_base, true);
+ dpp3_configure_blnd_lut(dpp_base, next_mode == LUT_RAM_A ? true:false);
+
+ if (next_mode == LUT_RAM_A)
+ dpp3_program_blnd_luta_settings(dpp_base, params);
+ else
+ dpp3_program_blnd_lutb_settings(dpp_base, params);
+
+ dpp3_program_blnd_pwl(
+ dpp_base, params->rgb_resulted, params->hw_points_num);
+
+ REG_UPDATE_2(CM_BLNDGAM_CONTROL,
+ CM_BLNDGAM_MODE, 2,
+ CM_BLNDGAM_SELECT, next_mode == LUT_RAM_A ? 0 : 1);
+
+ return true;
+}
+
+
+static void dpp3_program_shaper_lut(
+ struct dpp *dpp_base,
+ const struct pwl_result_data *rgb,
+ uint32_t num)
+{
+ uint32_t i, red, green, blue;
+ uint32_t red_delta, green_delta, blue_delta;
+ uint32_t red_value, green_value, blue_value;
+
+ struct dcn3_dpp *dpp = TO_DCN30_DPP(dpp_base);
+
+ for (i = 0 ; i < num; i++) {
+
+ red = rgb[i].red_reg;
+ green = rgb[i].green_reg;
+ blue = rgb[i].blue_reg;
+
+ red_delta = rgb[i].delta_red_reg;
+ green_delta = rgb[i].delta_green_reg;
+ blue_delta = rgb[i].delta_blue_reg;
+
+ red_value = ((red_delta & 0x3ff) << 14) | (red & 0x3fff);
+ green_value = ((green_delta & 0x3ff) << 14) | (green & 0x3fff);
+ blue_value = ((blue_delta & 0x3ff) << 14) | (blue & 0x3fff);
+
+ REG_SET(CM_SHAPER_LUT_DATA, 0, CM_SHAPER_LUT_DATA, red_value);
+ REG_SET(CM_SHAPER_LUT_DATA, 0, CM_SHAPER_LUT_DATA, green_value);
+ REG_SET(CM_SHAPER_LUT_DATA, 0, CM_SHAPER_LUT_DATA, blue_value);
+ }
+
+}
+
+static enum dc_lut_mode dpp3_get_shaper_current(struct dpp *dpp_base)
+{
+ enum dc_lut_mode mode;
+ uint32_t state_mode;
+ struct dcn3_dpp *dpp = TO_DCN30_DPP(dpp_base);
+
+ REG_GET(CM_SHAPER_CONTROL,
+ CM_SHAPER_MODE_CURRENT, &state_mode);
+
+ switch (state_mode) {
+ case 0:
+ mode = LUT_BYPASS;
+ break;
+ case 1:
+ mode = LUT_RAM_A;
+ break;
+ case 2:
+ mode = LUT_RAM_B;
+ break;
+ default:
+ mode = LUT_BYPASS;
+ break;
+ }
+ return mode;
+}
+
+static void dpp3_configure_shaper_lut(
+ struct dpp *dpp_base,
+ bool is_ram_a)
+{
+ struct dcn3_dpp *dpp = TO_DCN30_DPP(dpp_base);
+
+ REG_UPDATE(CM_SHAPER_LUT_WRITE_EN_MASK,
+ CM_SHAPER_LUT_WRITE_EN_MASK, 7);
+ REG_UPDATE(CM_SHAPER_LUT_WRITE_EN_MASK,
+ CM_SHAPER_LUT_WRITE_SEL, is_ram_a == true ? 0:1);
+ REG_SET(CM_SHAPER_LUT_INDEX, 0, CM_SHAPER_LUT_INDEX, 0);
+}
+
+/*program shaper RAM A*/
+
+static void dpp3_program_shaper_luta_settings(
+ struct dpp *dpp_base,
+ const struct pwl_params *params)
+{
+ const struct gamma_curve *curve;
+ struct dcn3_dpp *dpp = TO_DCN30_DPP(dpp_base);
+
+ REG_SET_2(CM_SHAPER_RAMA_START_CNTL_B, 0,
+ CM_SHAPER_RAMA_EXP_REGION_START_B, params->corner_points[0].blue.custom_float_x,
+ CM_SHAPER_RAMA_EXP_REGION_START_SEGMENT_B, 0);
+ REG_SET_2(CM_SHAPER_RAMA_START_CNTL_G, 0,
+ CM_SHAPER_RAMA_EXP_REGION_START_G, params->corner_points[0].green.custom_float_x,
+ CM_SHAPER_RAMA_EXP_REGION_START_SEGMENT_G, 0);
+ REG_SET_2(CM_SHAPER_RAMA_START_CNTL_R, 0,
+ CM_SHAPER_RAMA_EXP_REGION_START_R, params->corner_points[0].red.custom_float_x,
+ CM_SHAPER_RAMA_EXP_REGION_START_SEGMENT_R, 0);
+
+ REG_SET_2(CM_SHAPER_RAMA_END_CNTL_B, 0,
+ CM_SHAPER_RAMA_EXP_REGION_END_B, params->corner_points[1].blue.custom_float_x,
+ CM_SHAPER_RAMA_EXP_REGION_END_BASE_B, params->corner_points[1].blue.custom_float_y);
+
+ REG_SET_2(CM_SHAPER_RAMA_END_CNTL_G, 0,
+ CM_SHAPER_RAMA_EXP_REGION_END_G, params->corner_points[1].green.custom_float_x,
+ CM_SHAPER_RAMA_EXP_REGION_END_BASE_G, params->corner_points[1].green.custom_float_y);
+
+ REG_SET_2(CM_SHAPER_RAMA_END_CNTL_R, 0,
+ CM_SHAPER_RAMA_EXP_REGION_END_R, params->corner_points[1].red.custom_float_x,
+ CM_SHAPER_RAMA_EXP_REGION_END_BASE_R, params->corner_points[1].red.custom_float_y);
+
+ curve = params->arr_curve_points;
+ REG_SET_4(CM_SHAPER_RAMA_REGION_0_1, 0,
+ CM_SHAPER_RAMA_EXP_REGION0_LUT_OFFSET, curve[0].offset,
+ CM_SHAPER_RAMA_EXP_REGION0_NUM_SEGMENTS, curve[0].segments_num,
+ CM_SHAPER_RAMA_EXP_REGION1_LUT_OFFSET, curve[1].offset,
+ CM_SHAPER_RAMA_EXP_REGION1_NUM_SEGMENTS, curve[1].segments_num);
+
+ curve += 2;
+ REG_SET_4(CM_SHAPER_RAMA_REGION_2_3, 0,
+ CM_SHAPER_RAMA_EXP_REGION2_LUT_OFFSET, curve[0].offset,
+ CM_SHAPER_RAMA_EXP_REGION2_NUM_SEGMENTS, curve[0].segments_num,
+ CM_SHAPER_RAMA_EXP_REGION3_LUT_OFFSET, curve[1].offset,
+ CM_SHAPER_RAMA_EXP_REGION3_NUM_SEGMENTS, curve[1].segments_num);
+
+ curve += 2;
+ REG_SET_4(CM_SHAPER_RAMA_REGION_4_5, 0,
+ CM_SHAPER_RAMA_EXP_REGION4_LUT_OFFSET, curve[0].offset,
+ CM_SHAPER_RAMA_EXP_REGION4_NUM_SEGMENTS, curve[0].segments_num,
+ CM_SHAPER_RAMA_EXP_REGION5_LUT_OFFSET, curve[1].offset,
+ CM_SHAPER_RAMA_EXP_REGION5_NUM_SEGMENTS, curve[1].segments_num);
+
+ curve += 2;
+ REG_SET_4(CM_SHAPER_RAMA_REGION_6_7, 0,
+ CM_SHAPER_RAMA_EXP_REGION6_LUT_OFFSET, curve[0].offset,
+ CM_SHAPER_RAMA_EXP_REGION6_NUM_SEGMENTS, curve[0].segments_num,
+ CM_SHAPER_RAMA_EXP_REGION7_LUT_OFFSET, curve[1].offset,
+ CM_SHAPER_RAMA_EXP_REGION7_NUM_SEGMENTS, curve[1].segments_num);
+
+ curve += 2;
+ REG_SET_4(CM_SHAPER_RAMA_REGION_8_9, 0,
+ CM_SHAPER_RAMA_EXP_REGION8_LUT_OFFSET, curve[0].offset,
+ CM_SHAPER_RAMA_EXP_REGION8_NUM_SEGMENTS, curve[0].segments_num,
+ CM_SHAPER_RAMA_EXP_REGION9_LUT_OFFSET, curve[1].offset,
+ CM_SHAPER_RAMA_EXP_REGION9_NUM_SEGMENTS, curve[1].segments_num);
+
+ curve += 2;
+ REG_SET_4(CM_SHAPER_RAMA_REGION_10_11, 0,
+ CM_SHAPER_RAMA_EXP_REGION10_LUT_OFFSET, curve[0].offset,
+ CM_SHAPER_RAMA_EXP_REGION10_NUM_SEGMENTS, curve[0].segments_num,
+ CM_SHAPER_RAMA_EXP_REGION11_LUT_OFFSET, curve[1].offset,
+ CM_SHAPER_RAMA_EXP_REGION11_NUM_SEGMENTS, curve[1].segments_num);
+
+ curve += 2;
+ REG_SET_4(CM_SHAPER_RAMA_REGION_12_13, 0,
+ CM_SHAPER_RAMA_EXP_REGION12_LUT_OFFSET, curve[0].offset,
+ CM_SHAPER_RAMA_EXP_REGION12_NUM_SEGMENTS, curve[0].segments_num,
+ CM_SHAPER_RAMA_EXP_REGION13_LUT_OFFSET, curve[1].offset,
+ CM_SHAPER_RAMA_EXP_REGION13_NUM_SEGMENTS, curve[1].segments_num);
+
+ curve += 2;
+ REG_SET_4(CM_SHAPER_RAMA_REGION_14_15, 0,
+ CM_SHAPER_RAMA_EXP_REGION14_LUT_OFFSET, curve[0].offset,
+ CM_SHAPER_RAMA_EXP_REGION14_NUM_SEGMENTS, curve[0].segments_num,
+ CM_SHAPER_RAMA_EXP_REGION15_LUT_OFFSET, curve[1].offset,
+ CM_SHAPER_RAMA_EXP_REGION15_NUM_SEGMENTS, curve[1].segments_num);
+
+ curve += 2;
+ REG_SET_4(CM_SHAPER_RAMA_REGION_16_17, 0,
+ CM_SHAPER_RAMA_EXP_REGION16_LUT_OFFSET, curve[0].offset,
+ CM_SHAPER_RAMA_EXP_REGION16_NUM_SEGMENTS, curve[0].segments_num,
+ CM_SHAPER_RAMA_EXP_REGION17_LUT_OFFSET, curve[1].offset,
+ CM_SHAPER_RAMA_EXP_REGION17_NUM_SEGMENTS, curve[1].segments_num);
+
+ curve += 2;
+ REG_SET_4(CM_SHAPER_RAMA_REGION_18_19, 0,
+ CM_SHAPER_RAMA_EXP_REGION18_LUT_OFFSET, curve[0].offset,
+ CM_SHAPER_RAMA_EXP_REGION18_NUM_SEGMENTS, curve[0].segments_num,
+ CM_SHAPER_RAMA_EXP_REGION19_LUT_OFFSET, curve[1].offset,
+ CM_SHAPER_RAMA_EXP_REGION19_NUM_SEGMENTS, curve[1].segments_num);
+
+ curve += 2;
+ REG_SET_4(CM_SHAPER_RAMA_REGION_20_21, 0,
+ CM_SHAPER_RAMA_EXP_REGION20_LUT_OFFSET, curve[0].offset,
+ CM_SHAPER_RAMA_EXP_REGION20_NUM_SEGMENTS, curve[0].segments_num,
+ CM_SHAPER_RAMA_EXP_REGION21_LUT_OFFSET, curve[1].offset,
+ CM_SHAPER_RAMA_EXP_REGION21_NUM_SEGMENTS, curve[1].segments_num);
+
+ curve += 2;
+ REG_SET_4(CM_SHAPER_RAMA_REGION_22_23, 0,
+ CM_SHAPER_RAMA_EXP_REGION22_LUT_OFFSET, curve[0].offset,
+ CM_SHAPER_RAMA_EXP_REGION22_NUM_SEGMENTS, curve[0].segments_num,
+ CM_SHAPER_RAMA_EXP_REGION23_LUT_OFFSET, curve[1].offset,
+ CM_SHAPER_RAMA_EXP_REGION23_NUM_SEGMENTS, curve[1].segments_num);
+
+ curve += 2;
+ REG_SET_4(CM_SHAPER_RAMA_REGION_24_25, 0,
+ CM_SHAPER_RAMA_EXP_REGION24_LUT_OFFSET, curve[0].offset,
+ CM_SHAPER_RAMA_EXP_REGION24_NUM_SEGMENTS, curve[0].segments_num,
+ CM_SHAPER_RAMA_EXP_REGION25_LUT_OFFSET, curve[1].offset,
+ CM_SHAPER_RAMA_EXP_REGION25_NUM_SEGMENTS, curve[1].segments_num);
+
+ curve += 2;
+ REG_SET_4(CM_SHAPER_RAMA_REGION_26_27, 0,
+ CM_SHAPER_RAMA_EXP_REGION26_LUT_OFFSET, curve[0].offset,
+ CM_SHAPER_RAMA_EXP_REGION26_NUM_SEGMENTS, curve[0].segments_num,
+ CM_SHAPER_RAMA_EXP_REGION27_LUT_OFFSET, curve[1].offset,
+ CM_SHAPER_RAMA_EXP_REGION27_NUM_SEGMENTS, curve[1].segments_num);
+
+ curve += 2;
+ REG_SET_4(CM_SHAPER_RAMA_REGION_28_29, 0,
+ CM_SHAPER_RAMA_EXP_REGION28_LUT_OFFSET, curve[0].offset,
+ CM_SHAPER_RAMA_EXP_REGION28_NUM_SEGMENTS, curve[0].segments_num,
+ CM_SHAPER_RAMA_EXP_REGION29_LUT_OFFSET, curve[1].offset,
+ CM_SHAPER_RAMA_EXP_REGION29_NUM_SEGMENTS, curve[1].segments_num);
+
+ curve += 2;
+ REG_SET_4(CM_SHAPER_RAMA_REGION_30_31, 0,
+ CM_SHAPER_RAMA_EXP_REGION30_LUT_OFFSET, curve[0].offset,
+ CM_SHAPER_RAMA_EXP_REGION30_NUM_SEGMENTS, curve[0].segments_num,
+ CM_SHAPER_RAMA_EXP_REGION31_LUT_OFFSET, curve[1].offset,
+ CM_SHAPER_RAMA_EXP_REGION31_NUM_SEGMENTS, curve[1].segments_num);
+
+ curve += 2;
+ REG_SET_4(CM_SHAPER_RAMA_REGION_32_33, 0,
+ CM_SHAPER_RAMA_EXP_REGION32_LUT_OFFSET, curve[0].offset,
+ CM_SHAPER_RAMA_EXP_REGION32_NUM_SEGMENTS, curve[0].segments_num,
+ CM_SHAPER_RAMA_EXP_REGION33_LUT_OFFSET, curve[1].offset,
+ CM_SHAPER_RAMA_EXP_REGION33_NUM_SEGMENTS, curve[1].segments_num);
+}
+
+/*program shaper RAM B*/
+static void dpp3_program_shaper_lutb_settings(
+ struct dpp *dpp_base,
+ const struct pwl_params *params)
+{
+ const struct gamma_curve *curve;
+ struct dcn3_dpp *dpp = TO_DCN30_DPP(dpp_base);
+
+ REG_SET_2(CM_SHAPER_RAMB_START_CNTL_B, 0,
+ CM_SHAPER_RAMB_EXP_REGION_START_B, params->corner_points[0].blue.custom_float_x,
+ CM_SHAPER_RAMB_EXP_REGION_START_SEGMENT_B, 0);
+ REG_SET_2(CM_SHAPER_RAMB_START_CNTL_G, 0,
+ CM_SHAPER_RAMB_EXP_REGION_START_G, params->corner_points[0].green.custom_float_x,
+ CM_SHAPER_RAMB_EXP_REGION_START_SEGMENT_G, 0);
+ REG_SET_2(CM_SHAPER_RAMB_START_CNTL_R, 0,
+ CM_SHAPER_RAMB_EXP_REGION_START_R, params->corner_points[0].red.custom_float_x,
+ CM_SHAPER_RAMB_EXP_REGION_START_SEGMENT_R, 0);
+
+ REG_SET_2(CM_SHAPER_RAMB_END_CNTL_B, 0,
+ CM_SHAPER_RAMB_EXP_REGION_END_B, params->corner_points[1].blue.custom_float_x,
+ CM_SHAPER_RAMB_EXP_REGION_END_BASE_B, params->corner_points[1].blue.custom_float_y);
+
+ REG_SET_2(CM_SHAPER_RAMB_END_CNTL_G, 0,
+ CM_SHAPER_RAMB_EXP_REGION_END_G, params->corner_points[1].green.custom_float_x,
+ CM_SHAPER_RAMB_EXP_REGION_END_BASE_G, params->corner_points[1].green.custom_float_y);
+
+ REG_SET_2(CM_SHAPER_RAMB_END_CNTL_R, 0,
+ CM_SHAPER_RAMB_EXP_REGION_END_R, params->corner_points[1].red.custom_float_x,
+ CM_SHAPER_RAMB_EXP_REGION_END_BASE_R, params->corner_points[1].red.custom_float_y);
+
+ curve = params->arr_curve_points;
+ REG_SET_4(CM_SHAPER_RAMB_REGION_0_1, 0,
+ CM_SHAPER_RAMB_EXP_REGION0_LUT_OFFSET, curve[0].offset,
+ CM_SHAPER_RAMB_EXP_REGION0_NUM_SEGMENTS, curve[0].segments_num,
+ CM_SHAPER_RAMB_EXP_REGION1_LUT_OFFSET, curve[1].offset,
+ CM_SHAPER_RAMB_EXP_REGION1_NUM_SEGMENTS, curve[1].segments_num);
+
+ curve += 2;
+ REG_SET_4(CM_SHAPER_RAMB_REGION_2_3, 0,
+ CM_SHAPER_RAMB_EXP_REGION2_LUT_OFFSET, curve[0].offset,
+ CM_SHAPER_RAMB_EXP_REGION2_NUM_SEGMENTS, curve[0].segments_num,
+ CM_SHAPER_RAMB_EXP_REGION3_LUT_OFFSET, curve[1].offset,
+ CM_SHAPER_RAMB_EXP_REGION3_NUM_SEGMENTS, curve[1].segments_num);
+
+ curve += 2;
+ REG_SET_4(CM_SHAPER_RAMB_REGION_4_5, 0,
+ CM_SHAPER_RAMB_EXP_REGION4_LUT_OFFSET, curve[0].offset,
+ CM_SHAPER_RAMB_EXP_REGION4_NUM_SEGMENTS, curve[0].segments_num,
+ CM_SHAPER_RAMB_EXP_REGION5_LUT_OFFSET, curve[1].offset,
+ CM_SHAPER_RAMB_EXP_REGION5_NUM_SEGMENTS, curve[1].segments_num);
+
+ curve += 2;
+ REG_SET_4(CM_SHAPER_RAMB_REGION_6_7, 0,
+ CM_SHAPER_RAMB_EXP_REGION6_LUT_OFFSET, curve[0].offset,
+ CM_SHAPER_RAMB_EXP_REGION6_NUM_SEGMENTS, curve[0].segments_num,
+ CM_SHAPER_RAMB_EXP_REGION7_LUT_OFFSET, curve[1].offset,
+ CM_SHAPER_RAMB_EXP_REGION7_NUM_SEGMENTS, curve[1].segments_num);
+
+ curve += 2;
+ REG_SET_4(CM_SHAPER_RAMB_REGION_8_9, 0,
+ CM_SHAPER_RAMB_EXP_REGION8_LUT_OFFSET, curve[0].offset,
+ CM_SHAPER_RAMB_EXP_REGION8_NUM_SEGMENTS, curve[0].segments_num,
+ CM_SHAPER_RAMB_EXP_REGION9_LUT_OFFSET, curve[1].offset,
+ CM_SHAPER_RAMB_EXP_REGION9_NUM_SEGMENTS, curve[1].segments_num);
+
+ curve += 2;
+ REG_SET_4(CM_SHAPER_RAMB_REGION_10_11, 0,
+ CM_SHAPER_RAMB_EXP_REGION10_LUT_OFFSET, curve[0].offset,
+ CM_SHAPER_RAMB_EXP_REGION10_NUM_SEGMENTS, curve[0].segments_num,
+ CM_SHAPER_RAMB_EXP_REGION11_LUT_OFFSET, curve[1].offset,
+ CM_SHAPER_RAMB_EXP_REGION11_NUM_SEGMENTS, curve[1].segments_num);
+
+ curve += 2;
+ REG_SET_4(CM_SHAPER_RAMB_REGION_12_13, 0,
+ CM_SHAPER_RAMB_EXP_REGION12_LUT_OFFSET, curve[0].offset,
+ CM_SHAPER_RAMB_EXP_REGION12_NUM_SEGMENTS, curve[0].segments_num,
+ CM_SHAPER_RAMB_EXP_REGION13_LUT_OFFSET, curve[1].offset,
+ CM_SHAPER_RAMB_EXP_REGION13_NUM_SEGMENTS, curve[1].segments_num);
+
+ curve += 2;
+ REG_SET_4(CM_SHAPER_RAMB_REGION_14_15, 0,
+ CM_SHAPER_RAMB_EXP_REGION14_LUT_OFFSET, curve[0].offset,
+ CM_SHAPER_RAMB_EXP_REGION14_NUM_SEGMENTS, curve[0].segments_num,
+ CM_SHAPER_RAMB_EXP_REGION15_LUT_OFFSET, curve[1].offset,
+ CM_SHAPER_RAMB_EXP_REGION15_NUM_SEGMENTS, curve[1].segments_num);
+
+ curve += 2;
+ REG_SET_4(CM_SHAPER_RAMB_REGION_16_17, 0,
+ CM_SHAPER_RAMB_EXP_REGION16_LUT_OFFSET, curve[0].offset,
+ CM_SHAPER_RAMB_EXP_REGION16_NUM_SEGMENTS, curve[0].segments_num,
+ CM_SHAPER_RAMB_EXP_REGION17_LUT_OFFSET, curve[1].offset,
+ CM_SHAPER_RAMB_EXP_REGION17_NUM_SEGMENTS, curve[1].segments_num);
+
+ curve += 2;
+ REG_SET_4(CM_SHAPER_RAMB_REGION_18_19, 0,
+ CM_SHAPER_RAMB_EXP_REGION18_LUT_OFFSET, curve[0].offset,
+ CM_SHAPER_RAMB_EXP_REGION18_NUM_SEGMENTS, curve[0].segments_num,
+ CM_SHAPER_RAMB_EXP_REGION19_LUT_OFFSET, curve[1].offset,
+ CM_SHAPER_RAMB_EXP_REGION19_NUM_SEGMENTS, curve[1].segments_num);
+
+ curve += 2;
+ REG_SET_4(CM_SHAPER_RAMB_REGION_20_21, 0,
+ CM_SHAPER_RAMB_EXP_REGION20_LUT_OFFSET, curve[0].offset,
+ CM_SHAPER_RAMB_EXP_REGION20_NUM_SEGMENTS, curve[0].segments_num,
+ CM_SHAPER_RAMB_EXP_REGION21_LUT_OFFSET, curve[1].offset,
+ CM_SHAPER_RAMB_EXP_REGION21_NUM_SEGMENTS, curve[1].segments_num);
+
+ curve += 2;
+ REG_SET_4(CM_SHAPER_RAMB_REGION_22_23, 0,
+ CM_SHAPER_RAMB_EXP_REGION22_LUT_OFFSET, curve[0].offset,
+ CM_SHAPER_RAMB_EXP_REGION22_NUM_SEGMENTS, curve[0].segments_num,
+ CM_SHAPER_RAMB_EXP_REGION23_LUT_OFFSET, curve[1].offset,
+ CM_SHAPER_RAMB_EXP_REGION23_NUM_SEGMENTS, curve[1].segments_num);
+
+ curve += 2;
+ REG_SET_4(CM_SHAPER_RAMB_REGION_24_25, 0,
+ CM_SHAPER_RAMB_EXP_REGION24_LUT_OFFSET, curve[0].offset,
+ CM_SHAPER_RAMB_EXP_REGION24_NUM_SEGMENTS, curve[0].segments_num,
+ CM_SHAPER_RAMB_EXP_REGION25_LUT_OFFSET, curve[1].offset,
+ CM_SHAPER_RAMB_EXP_REGION25_NUM_SEGMENTS, curve[1].segments_num);
+
+ curve += 2;
+ REG_SET_4(CM_SHAPER_RAMB_REGION_26_27, 0,
+ CM_SHAPER_RAMB_EXP_REGION26_LUT_OFFSET, curve[0].offset,
+ CM_SHAPER_RAMB_EXP_REGION26_NUM_SEGMENTS, curve[0].segments_num,
+ CM_SHAPER_RAMB_EXP_REGION27_LUT_OFFSET, curve[1].offset,
+ CM_SHAPER_RAMB_EXP_REGION27_NUM_SEGMENTS, curve[1].segments_num);
+
+ curve += 2;
+ REG_SET_4(CM_SHAPER_RAMB_REGION_28_29, 0,
+ CM_SHAPER_RAMB_EXP_REGION28_LUT_OFFSET, curve[0].offset,
+ CM_SHAPER_RAMB_EXP_REGION28_NUM_SEGMENTS, curve[0].segments_num,
+ CM_SHAPER_RAMB_EXP_REGION29_LUT_OFFSET, curve[1].offset,
+ CM_SHAPER_RAMB_EXP_REGION29_NUM_SEGMENTS, curve[1].segments_num);
+
+ curve += 2;
+ REG_SET_4(CM_SHAPER_RAMB_REGION_30_31, 0,
+ CM_SHAPER_RAMB_EXP_REGION30_LUT_OFFSET, curve[0].offset,
+ CM_SHAPER_RAMB_EXP_REGION30_NUM_SEGMENTS, curve[0].segments_num,
+ CM_SHAPER_RAMB_EXP_REGION31_LUT_OFFSET, curve[1].offset,
+ CM_SHAPER_RAMB_EXP_REGION31_NUM_SEGMENTS, curve[1].segments_num);
+
+ curve += 2;
+ REG_SET_4(CM_SHAPER_RAMB_REGION_32_33, 0,
+ CM_SHAPER_RAMB_EXP_REGION32_LUT_OFFSET, curve[0].offset,
+ CM_SHAPER_RAMB_EXP_REGION32_NUM_SEGMENTS, curve[0].segments_num,
+ CM_SHAPER_RAMB_EXP_REGION33_LUT_OFFSET, curve[1].offset,
+ CM_SHAPER_RAMB_EXP_REGION33_NUM_SEGMENTS, curve[1].segments_num);
+
+}
+
+
+bool dpp3_program_shaper(
+ struct dpp *dpp_base,
+ const struct pwl_params *params)
+{
+ enum dc_lut_mode current_mode;
+ enum dc_lut_mode next_mode;
+
+ struct dcn3_dpp *dpp = TO_DCN30_DPP(dpp_base);
+
+ if (params == NULL) {
+ REG_SET(CM_SHAPER_CONTROL, 0, CM_SHAPER_LUT_MODE, 0);
+ return false;
+ }
+ current_mode = dpp3_get_shaper_current(dpp_base);
+
+ if (current_mode == LUT_BYPASS || current_mode == LUT_RAM_A)
+ next_mode = LUT_RAM_B;
+ else
+ next_mode = LUT_RAM_A;
+
+ dpp3_configure_shaper_lut(dpp_base, next_mode == LUT_RAM_A ? true:false);
+
+ if (next_mode == LUT_RAM_A)
+ dpp3_program_shaper_luta_settings(dpp_base, params);
+ else
+ dpp3_program_shaper_lutb_settings(dpp_base, params);
+
+ dpp3_program_shaper_lut(
+ dpp_base, params->rgb_resulted, params->hw_points_num);
+
+ REG_SET(CM_SHAPER_CONTROL, 0, CM_SHAPER_LUT_MODE, next_mode == LUT_RAM_A ? 1:2);
+
+ return true;
+
+}
+
+static enum dc_lut_mode get3dlut_config(
+ struct dpp *dpp_base,
+ bool *is_17x17x17,
+ bool *is_12bits_color_channel)
+{
+ uint32_t i_mode, i_enable_10bits, lut_size;
+ enum dc_lut_mode mode;
+ struct dcn3_dpp *dpp = TO_DCN30_DPP(dpp_base);
+
+ REG_GET(CM_3DLUT_READ_WRITE_CONTROL,
+ CM_3DLUT_30BIT_EN, &i_enable_10bits);
+ REG_GET(CM_3DLUT_MODE,
+ CM_3DLUT_MODE_CURRENT, &i_mode);
+
+ switch (i_mode) {
+ case 0:
+ mode = LUT_BYPASS;
+ break;
+ case 1:
+ mode = LUT_RAM_A;
+ break;
+ case 2:
+ mode = LUT_RAM_B;
+ break;
+ default:
+ mode = LUT_BYPASS;
+ break;
+ }
+ if (i_enable_10bits > 0)
+ *is_12bits_color_channel = false;
+ else
+ *is_12bits_color_channel = true;
+
+ REG_GET(CM_3DLUT_MODE, CM_3DLUT_SIZE, &lut_size);
+
+ if (lut_size == 0)
+ *is_17x17x17 = true;
+ else
+ *is_17x17x17 = false;
+
+ return mode;
+}
+/*
+ * select ramA or ramB, or bypass
+ * select color channel size 10 or 12 bits
+ * select 3dlut size 17x17x17 or 9x9x9
+ */
+static void dpp3_set_3dlut_mode(
+ struct dpp *dpp_base,
+ enum dc_lut_mode mode,
+ bool is_color_channel_12bits,
+ bool is_lut_size17x17x17)
+{
+ uint32_t lut_mode;
+ struct dcn3_dpp *dpp = TO_DCN30_DPP(dpp_base);
+
+ if (mode == LUT_BYPASS)
+ lut_mode = 0;
+ else if (mode == LUT_RAM_A)
+ lut_mode = 1;
+ else
+ lut_mode = 2;
+
+ REG_UPDATE_2(CM_3DLUT_MODE,
+ CM_3DLUT_MODE, lut_mode,
+ CM_3DLUT_SIZE, is_lut_size17x17x17 == true ? 0 : 1);
+}
+
+static void dpp3_select_3dlut_ram(
+ struct dpp *dpp_base,
+ enum dc_lut_mode mode,
+ bool is_color_channel_12bits)
+{
+ struct dcn3_dpp *dpp = TO_DCN30_DPP(dpp_base);
+
+ REG_UPDATE_2(CM_3DLUT_READ_WRITE_CONTROL,
+ CM_3DLUT_RAM_SEL, mode == LUT_RAM_A ? 0 : 1,
+ CM_3DLUT_30BIT_EN,
+ is_color_channel_12bits == true ? 0:1);
+}
+
+
+
+static void dpp3_set3dlut_ram12(
+ struct dpp *dpp_base,
+ const struct dc_rgb *lut,
+ uint32_t entries)
+{
+ uint32_t i, red, green, blue, red1, green1, blue1;
+ struct dcn3_dpp *dpp = TO_DCN30_DPP(dpp_base);
+
+ for (i = 0 ; i < entries; i += 2) {
+ red = lut[i].red<<4;
+ green = lut[i].green<<4;
+ blue = lut[i].blue<<4;
+ red1 = lut[i+1].red<<4;
+ green1 = lut[i+1].green<<4;
+ blue1 = lut[i+1].blue<<4;
+
+ REG_SET_2(CM_3DLUT_DATA, 0,
+ CM_3DLUT_DATA0, red,
+ CM_3DLUT_DATA1, red1);
+
+ REG_SET_2(CM_3DLUT_DATA, 0,
+ CM_3DLUT_DATA0, green,
+ CM_3DLUT_DATA1, green1);
+
+ REG_SET_2(CM_3DLUT_DATA, 0,
+ CM_3DLUT_DATA0, blue,
+ CM_3DLUT_DATA1, blue1);
+
+ }
+}
+
+/*
+ * load selected lut with 10 bits color channels
+ */
+static void dpp3_set3dlut_ram10(
+ struct dpp *dpp_base,
+ const struct dc_rgb *lut,
+ uint32_t entries)
+{
+ uint32_t i, red, green, blue, value;
+ struct dcn3_dpp *dpp = TO_DCN30_DPP(dpp_base);
+
+ for (i = 0; i < entries; i++) {
+ red = lut[i].red;
+ green = lut[i].green;
+ blue = lut[i].blue;
+
+ value = (red<<20) | (green<<10) | blue;
+
+ REG_SET(CM_3DLUT_DATA_30BIT, 0, CM_3DLUT_DATA_30BIT, value);
+ }
+
+}
+
+
+static void dpp3_select_3dlut_ram_mask(
+ struct dpp *dpp_base,
+ uint32_t ram_selection_mask)
+{
+ struct dcn3_dpp *dpp = TO_DCN30_DPP(dpp_base);
+
+ REG_UPDATE(CM_3DLUT_READ_WRITE_CONTROL, CM_3DLUT_WRITE_EN_MASK,
+ ram_selection_mask);
+ REG_SET(CM_3DLUT_INDEX, 0, CM_3DLUT_INDEX, 0);
+}
+
+bool dpp3_program_3dlut(
+ struct dpp *dpp_base,
+ struct tetrahedral_params *params)
+{
+ enum dc_lut_mode mode;
+ bool is_17x17x17;
+ bool is_12bits_color_channel;
+ struct dc_rgb *lut0;
+ struct dc_rgb *lut1;
+ struct dc_rgb *lut2;
+ struct dc_rgb *lut3;
+ int lut_size0;
+ int lut_size;
+
+ if (params == NULL) {
+ dpp3_set_3dlut_mode(dpp_base, LUT_BYPASS, false, false);
+ return false;
+ }
+ mode = get3dlut_config(dpp_base, &is_17x17x17, &is_12bits_color_channel);
+
+ if (mode == LUT_BYPASS || mode == LUT_RAM_B)
+ mode = LUT_RAM_A;
+ else
+ mode = LUT_RAM_B;
+
+ is_17x17x17 = !params->use_tetrahedral_9;
+ is_12bits_color_channel = params->use_12bits;
+ if (is_17x17x17) {
+ lut0 = params->tetrahedral_17.lut0;
+ lut1 = params->tetrahedral_17.lut1;
+ lut2 = params->tetrahedral_17.lut2;
+ lut3 = params->tetrahedral_17.lut3;
+ lut_size0 = sizeof(params->tetrahedral_17.lut0)/
+ sizeof(params->tetrahedral_17.lut0[0]);
+ lut_size = sizeof(params->tetrahedral_17.lut1)/
+ sizeof(params->tetrahedral_17.lut1[0]);
+ } else {
+ lut0 = params->tetrahedral_9.lut0;
+ lut1 = params->tetrahedral_9.lut1;
+ lut2 = params->tetrahedral_9.lut2;
+ lut3 = params->tetrahedral_9.lut3;
+ lut_size0 = sizeof(params->tetrahedral_9.lut0)/
+ sizeof(params->tetrahedral_9.lut0[0]);
+ lut_size = sizeof(params->tetrahedral_9.lut1)/
+ sizeof(params->tetrahedral_9.lut1[0]);
+ }
+
+ dpp3_select_3dlut_ram(dpp_base, mode,
+ is_12bits_color_channel);
+ dpp3_select_3dlut_ram_mask(dpp_base, 0x1);
+ if (is_12bits_color_channel)
+ dpp3_set3dlut_ram12(dpp_base, lut0, lut_size0);
+ else
+ dpp3_set3dlut_ram10(dpp_base, lut0, lut_size0);
+
+ dpp3_select_3dlut_ram_mask(dpp_base, 0x2);
+ if (is_12bits_color_channel)
+ dpp3_set3dlut_ram12(dpp_base, lut1, lut_size);
+ else
+ dpp3_set3dlut_ram10(dpp_base, lut1, lut_size);
+
+ dpp3_select_3dlut_ram_mask(dpp_base, 0x4);
+ if (is_12bits_color_channel)
+ dpp3_set3dlut_ram12(dpp_base, lut2, lut_size);
+ else
+ dpp3_set3dlut_ram10(dpp_base, lut2, lut_size);
+
+ dpp3_select_3dlut_ram_mask(dpp_base, 0x8);
+ if (is_12bits_color_channel)
+ dpp3_set3dlut_ram12(dpp_base, lut3, lut_size);
+ else
+ dpp3_set3dlut_ram10(dpp_base, lut3, lut_size);
+
+
+ dpp3_set_3dlut_mode(dpp_base, mode, is_12bits_color_channel,
+ is_17x17x17);
+
+ return true;
+}
+static struct dpp_funcs dcn30_dpp_funcs = {
+ .dpp_program_gamcor_lut = dpp3_program_gamcor_lut,
+ .dpp_read_state = dpp30_read_state,
+ .dpp_reset = dpp_reset,
+ .dpp_set_scaler = dpp1_dscl_set_scaler_manual_scale,
+ .dpp_get_optimal_number_of_taps = dpp3_get_optimal_number_of_taps,
+ .dpp_set_gamut_remap = dpp3_cm_set_gamut_remap,
+ .dpp_set_csc_adjustment = NULL,
+ .dpp_set_csc_default = NULL,
+ .dpp_program_regamma_pwl = NULL,
+ .dpp_set_pre_degam = dpp3_set_pre_degam,
+ .dpp_program_input_lut = NULL,
+ .dpp_full_bypass = dpp1_full_bypass,
+ .dpp_setup = dpp3_cnv_setup,
+ .dpp_program_degamma_pwl = NULL,
+ .dpp_program_cm_dealpha = dpp3_program_cm_dealpha,
+ .dpp_program_cm_bias = dpp3_program_cm_bias,
+#if defined(CONFIG_DRM_AMD_DC_DCN3_0)
+ .dpp_program_blnd_lut = dpp3_program_blnd_lut,
+ .dpp_program_shaper_lut = dpp3_program_shaper,
+ .dpp_program_3dlut = dpp3_program_3dlut,
+#else
+ .dpp_program_blnd_lut = NULL,
+ .dpp_program_shaper_lut = NULL,
+ .dpp_program_3dlut = NULL,
+#endif
+
+ .dpp_program_bias_and_scale = NULL,
+ .dpp_cnv_set_alpha_keyer = dpp2_cnv_set_alpha_keyer,
+ .set_cursor_attributes = dpp3_set_cursor_attributes,
+ .set_cursor_position = dpp1_set_cursor_position,
+ .set_optional_cursor_attributes = dpp1_cnv_set_optional_cursor_attributes,
+ .dpp_dppclk_control = dpp1_dppclk_control,
+ .dpp_set_hdr_multiplier = dpp3_set_hdr_multiplier,
+};
+
+
+static struct dpp_caps dcn30_dpp_cap = {
+ .dscl_data_proc_format = DSCL_DATA_PRCESSING_FLOAT_FORMAT,
+ .dscl_calc_lb_num_partitions = dscl2_calc_lb_num_partitions,
+};
+
+bool dpp3_construct(
+ struct dcn3_dpp *dpp,
+ struct dc_context *ctx,
+ uint32_t inst,
+ const struct dcn3_dpp_registers *tf_regs,
+ const struct dcn3_dpp_shift *tf_shift,
+ const struct dcn3_dpp_mask *tf_mask)
+{
+ dpp->base.ctx = ctx;
+
+ dpp->base.inst = inst;
+ dpp->base.funcs = &dcn30_dpp_funcs;
+ dpp->base.caps = &dcn30_dpp_cap;
+
+ dpp->tf_regs = tf_regs;
+ dpp->tf_shift = tf_shift;
+ dpp->tf_mask = tf_mask;
+
+ dpp->lb_pixel_depth_supported =
+ LB_PIXEL_DEPTH_18BPP |
+ LB_PIXEL_DEPTH_24BPP |
+ LB_PIXEL_DEPTH_30BPP;
+
+ dpp->lb_bits_per_entry = LB_BITS_PER_ENTRY;
+ dpp->lb_memory_size = LB_TOTAL_NUMBER_OF_ENTRIES; /*0x1404*/
+
+ return true;
+}
+
diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dpp.h b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dpp.h
new file mode 100644
index 000000000000..7f6bedbc1ff1
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dpp.h
@@ -0,0 +1,608 @@
+/* Copyright 2020 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef __DCN30_DPP_H__
+#define __DCN30_DPP_H__
+
+#include "dcn20/dcn20_dpp.h"
+
+#define TO_DCN30_DPP(dpp)\
+ container_of(dpp, struct dcn3_dpp, base)
+
+#define DPP_REG_LIST_DCN30_COMMON(id)\
+ SRI(CM_DEALPHA, CM, id),\
+ SRI(CM_MEM_PWR_STATUS, CM, id),\
+ SRI(CM_BIAS_CR_R, CM, id),\
+ SRI(CM_BIAS_Y_G_CB_B, CM, id),\
+ SRI(PRE_DEGAM, CNVC_CFG, id),\
+ SRI(CM_GAMCOR_CONTROL, CM, id),\
+ SRI(CM_GAMCOR_LUT_CONTROL, CM, id),\
+ SRI(CM_GAMCOR_LUT_INDEX, CM, id),\
+ SRI(CM_GAMCOR_LUT_INDEX, CM, id),\
+ SRI(CM_GAMCOR_LUT_DATA, CM, id),\
+ SRI(CM_GAMCOR_RAMB_START_CNTL_B, CM, id),\
+ SRI(CM_GAMCOR_RAMB_START_CNTL_G, CM, id),\
+ SRI(CM_GAMCOR_RAMB_START_CNTL_R, CM, id),\
+ SRI(CM_GAMCOR_RAMB_START_SLOPE_CNTL_B, CM, id),\
+ SRI(CM_GAMCOR_RAMB_START_SLOPE_CNTL_G, CM, id),\
+ SRI(CM_GAMCOR_RAMB_START_SLOPE_CNTL_R, CM, id),\
+ SRI(CM_GAMCOR_RAMB_END_CNTL1_B, CM, id),\
+ SRI(CM_GAMCOR_RAMB_END_CNTL2_B, CM, id),\
+ SRI(CM_GAMCOR_RAMB_END_CNTL1_G, CM, id),\
+ SRI(CM_GAMCOR_RAMB_END_CNTL2_G, CM, id),\
+ SRI(CM_GAMCOR_RAMB_END_CNTL1_R, CM, id),\
+ SRI(CM_GAMCOR_RAMB_END_CNTL2_R, CM, id),\
+ SRI(CM_GAMCOR_RAMB_REGION_0_1, CM, id),\
+ SRI(CM_GAMCOR_RAMB_REGION_32_33, CM, id),\
+ SRI(CM_GAMCOR_RAMB_OFFSET_B, CM, id),\
+ SRI(CM_GAMCOR_RAMB_OFFSET_G, CM, id),\
+ SRI(CM_GAMCOR_RAMB_OFFSET_R, CM, id),\
+ SRI(CM_GAMCOR_RAMB_START_BASE_CNTL_B, CM, id),\
+ SRI(CM_GAMCOR_RAMB_START_BASE_CNTL_G, CM, id),\
+ SRI(CM_GAMCOR_RAMB_START_BASE_CNTL_R, CM, id),\
+ SRI(CM_GAMCOR_RAMA_START_CNTL_B, CM, id),\
+ SRI(CM_GAMCOR_RAMA_START_CNTL_G, CM, id),\
+ SRI(CM_GAMCOR_RAMA_START_CNTL_R, CM, id),\
+ SRI(CM_GAMCOR_RAMA_START_SLOPE_CNTL_B, CM, id),\
+ SRI(CM_GAMCOR_RAMA_START_SLOPE_CNTL_G, CM, id),\
+ SRI(CM_GAMCOR_RAMA_START_SLOPE_CNTL_R, CM, id),\
+ SRI(CM_GAMCOR_RAMA_END_CNTL1_B, CM, id),\
+ SRI(CM_GAMCOR_RAMA_END_CNTL2_B, CM, id),\
+ SRI(CM_GAMCOR_RAMA_END_CNTL1_G, CM, id),\
+ SRI(CM_GAMCOR_RAMA_END_CNTL2_G, CM, id),\
+ SRI(CM_GAMCOR_RAMA_END_CNTL1_R, CM, id),\
+ SRI(CM_GAMCOR_RAMA_END_CNTL2_R, CM, id),\
+ SRI(CM_GAMCOR_RAMA_REGION_0_1, CM, id),\
+ SRI(CM_GAMCOR_RAMA_REGION_32_33, CM, id),\
+ SRI(CM_GAMCOR_RAMA_OFFSET_B, CM, id),\
+ SRI(CM_GAMCOR_RAMA_OFFSET_G, CM, id),\
+ SRI(CM_GAMCOR_RAMA_OFFSET_R, CM, id),\
+ SRI(CM_GAMCOR_RAMA_START_BASE_CNTL_B, CM, id),\
+ SRI(CM_GAMCOR_RAMA_START_BASE_CNTL_G, CM, id),\
+ SRI(CM_GAMCOR_RAMA_START_BASE_CNTL_R, CM, id),\
+ SRI(CM_GAMUT_REMAP_CONTROL, CM, id),\
+ SRI(CM_GAMUT_REMAP_C11_C12, CM, id),\
+ SRI(CM_GAMUT_REMAP_C13_C14, CM, id),\
+ SRI(CM_GAMUT_REMAP_C21_C22, CM, id),\
+ SRI(CM_GAMUT_REMAP_C23_C24, CM, id),\
+ SRI(CM_GAMUT_REMAP_C31_C32, CM, id),\
+ SRI(CM_GAMUT_REMAP_C33_C34, CM, id),\
+ SRI(CM_GAMUT_REMAP_B_C11_C12, CM, id),\
+ SRI(CM_GAMUT_REMAP_B_C13_C14, CM, id),\
+ SRI(CM_GAMUT_REMAP_B_C21_C22, CM, id),\
+ SRI(CM_GAMUT_REMAP_B_C23_C24, CM, id),\
+ SRI(CM_GAMUT_REMAP_B_C31_C32, CM, id),\
+ SRI(CM_GAMUT_REMAP_B_C33_C34, CM, id),\
+ SRI(DSCL_EXT_OVERSCAN_LEFT_RIGHT, DSCL, id), \
+ SRI(DSCL_EXT_OVERSCAN_TOP_BOTTOM, DSCL, id), \
+ SRI(OTG_H_BLANK, DSCL, id), \
+ SRI(OTG_V_BLANK, DSCL, id), \
+ SRI(SCL_MODE, DSCL, id), \
+ SRI(LB_DATA_FORMAT, DSCL, id), \
+ SRI(LB_MEMORY_CTRL, DSCL, id), \
+ SRI(DSCL_AUTOCAL, DSCL, id), \
+ SRI(SCL_TAP_CONTROL, DSCL, id), \
+ SRI(SCL_COEF_RAM_TAP_SELECT, DSCL, id), \
+ SRI(SCL_COEF_RAM_TAP_DATA, DSCL, id), \
+ SRI(DSCL_2TAP_CONTROL, DSCL, id), \
+ SRI(MPC_SIZE, DSCL, id), \
+ SRI(SCL_HORZ_FILTER_SCALE_RATIO, DSCL, id), \
+ SRI(SCL_VERT_FILTER_SCALE_RATIO, DSCL, id), \
+ SRI(SCL_HORZ_FILTER_SCALE_RATIO_C, DSCL, id), \
+ SRI(SCL_VERT_FILTER_SCALE_RATIO_C, DSCL, id), \
+ SRI(SCL_HORZ_FILTER_INIT, DSCL, id), \
+ SRI(SCL_HORZ_FILTER_INIT_C, DSCL, id), \
+ SRI(SCL_VERT_FILTER_INIT, DSCL, id), \
+ SRI(SCL_VERT_FILTER_INIT_C, DSCL, id), \
+ SRI(RECOUT_START, DSCL, id), \
+ SRI(RECOUT_SIZE, DSCL, id), \
+ SRI(PRE_DEALPHA, CNVC_CFG, id), \
+ SRI(PRE_REALPHA, CNVC_CFG, id), \
+ SRI(PRE_CSC_MODE, CNVC_CFG, id), \
+ SRI(PRE_CSC_C11_C12, CNVC_CFG, id), \
+ SRI(PRE_CSC_C33_C34, CNVC_CFG, id), \
+ SRI(PRE_CSC_B_C11_C12, CNVC_CFG, id), \
+ SRI(PRE_CSC_B_C33_C34, CNVC_CFG, id), \
+ SRI(CM_POST_CSC_CONTROL, CM, id), \
+ SRI(CM_POST_CSC_C11_C12, CM, id), \
+ SRI(CM_POST_CSC_C33_C34, CM, id), \
+ SRI(CM_POST_CSC_B_C11_C12, CM, id), \
+ SRI(CM_POST_CSC_B_C33_C34, CM, id), \
+ SRI(CM_MEM_PWR_CTRL, CM, id), \
+ SRI(CM_CONTROL, CM, id), \
+ SRI(FORMAT_CONTROL, CNVC_CFG, id), \
+ SRI(CNVC_SURFACE_PIXEL_FORMAT, CNVC_CFG, id), \
+ SRI(CURSOR0_CONTROL, CNVC_CUR, id), \
+ SRI(CURSOR0_COLOR0, CNVC_CUR, id), \
+ SRI(CURSOR0_COLOR1, CNVC_CUR, id), \
+ SRI(CURSOR0_FP_SCALE_BIAS, CNVC_CUR, id), \
+ SRI(DPP_CONTROL, DPP_TOP, id), \
+ SRI(CM_HDR_MULT_COEF, CM, id), \
+ SRI(CURSOR_CONTROL, CURSOR0_, id), \
+ SRI(ALPHA_2BIT_LUT, CNVC_CFG, id), \
+ SRI(FCNV_FP_BIAS_R, CNVC_CFG, id), \
+ SRI(FCNV_FP_BIAS_G, CNVC_CFG, id), \
+ SRI(FCNV_FP_BIAS_B, CNVC_CFG, id), \
+ SRI(FCNV_FP_SCALE_R, CNVC_CFG, id), \
+ SRI(FCNV_FP_SCALE_G, CNVC_CFG, id), \
+ SRI(FCNV_FP_SCALE_B, CNVC_CFG, id), \
+ SRI(COLOR_KEYER_CONTROL, CNVC_CFG, id), \
+ SRI(COLOR_KEYER_ALPHA, CNVC_CFG, id), \
+ SRI(COLOR_KEYER_RED, CNVC_CFG, id), \
+ SRI(COLOR_KEYER_GREEN, CNVC_CFG, id), \
+ SRI(COLOR_KEYER_BLUE, CNVC_CFG, id), \
+ SRI(CURSOR_CONTROL, CURSOR0_, id),\
+ SRI(OBUF_MEM_PWR_CTRL, DSCL, id),\
+ SRI(DSCL_MEM_PWR_CTRL, DSCL, id)
+
+#define DPP_REG_LIST_DCN30(id)\
+ DPP_REG_LIST_DCN30_COMMON(id), \
+ TF_REG_LIST_DCN20_COMMON(id), \
+ SRI(CM_BLNDGAM_CONTROL, CM, id), \
+ SRI(CM_SHAPER_LUT_DATA, CM, id),\
+ SRI(CM_BLNDGAM_RAMA_START_SLOPE_CNTL_B, CM, id),\
+ SRI(CM_BLNDGAM_RAMA_START_SLOPE_CNTL_G, CM, id),\
+ SRI(CM_BLNDGAM_RAMA_START_SLOPE_CNTL_R, CM, id),\
+ SRI(CM_BLNDGAM_RAMB_START_SLOPE_CNTL_B, CM, id),\
+ SRI(CM_BLNDGAM_RAMB_START_SLOPE_CNTL_G, CM, id),\
+ SRI(CM_BLNDGAM_RAMB_START_SLOPE_CNTL_R, CM, id),\
+ SRI(CM_BLNDGAM_LUT_CONTROL, CM, id)
+
+
+
+#define DPP_REG_LIST_SH_MASK_DCN30_COMMON(mask_sh)\
+ TF_SF(CM0_CM_MEM_PWR_STATUS, GAMCOR_MEM_PWR_STATE, mask_sh),\
+ TF_SF(CM0_CM_DEALPHA, CM_DEALPHA_EN, mask_sh),\
+ TF_SF(CM0_CM_DEALPHA, CM_DEALPHA_ABLND, mask_sh),\
+ TF_SF(CM0_CM_BIAS_CR_R, CM_BIAS_CR_R, mask_sh),\
+ TF_SF(CM0_CM_BIAS_Y_G_CB_B, CM_BIAS_Y_G, mask_sh),\
+ TF_SF(CM0_CM_BIAS_Y_G_CB_B, CM_BIAS_CB_B, mask_sh),\
+ TF_SF(CM0_CM_MEM_PWR_CTRL, GAMCOR_MEM_PWR_DIS, mask_sh),\
+ TF_SF(CM0_CM_MEM_PWR_CTRL, GAMCOR_MEM_PWR_FORCE, mask_sh),\
+ TF_SF(CNVC_CFG0_PRE_DEGAM, PRE_DEGAM_MODE, mask_sh),\
+ TF_SF(CNVC_CFG0_PRE_DEGAM, PRE_DEGAM_SELECT, mask_sh),\
+ TF_SF(CM0_CM_GAMCOR_CONTROL, CM_GAMCOR_MODE, mask_sh),\
+ TF_SF(CM0_CM_GAMCOR_CONTROL, CM_GAMCOR_SELECT, mask_sh),\
+ TF_SF(CM0_CM_GAMCOR_CONTROL, CM_GAMCOR_PWL_DISABLE, mask_sh),\
+ TF_SF(CM0_CM_GAMCOR_CONTROL, CM_GAMCOR_MODE_CURRENT, mask_sh),\
+ TF_SF(CM0_CM_GAMCOR_CONTROL, CM_GAMCOR_SELECT_CURRENT, mask_sh),\
+ TF_SF(CM0_CM_GAMCOR_LUT_INDEX, CM_GAMCOR_LUT_INDEX, mask_sh),\
+ TF_SF(CM0_CM_GAMCOR_LUT_DATA, CM_GAMCOR_LUT_DATA, mask_sh),\
+ TF_SF(CM0_CM_GAMCOR_LUT_CONTROL, CM_GAMCOR_LUT_WRITE_COLOR_MASK, mask_sh),\
+ TF_SF(CM0_CM_GAMCOR_LUT_CONTROL, CM_GAMCOR_LUT_READ_COLOR_SEL, mask_sh),\
+ TF_SF(CM0_CM_GAMCOR_LUT_CONTROL, CM_GAMCOR_LUT_READ_DBG, mask_sh),\
+ TF_SF(CM0_CM_GAMCOR_LUT_CONTROL, CM_GAMCOR_LUT_HOST_SEL, mask_sh),\
+ TF_SF(CM0_CM_GAMCOR_LUT_CONTROL, CM_GAMCOR_LUT_CONFIG_MODE, mask_sh),\
+ TF_SF(CM0_CM_GAMCOR_RAMA_START_CNTL_B, CM_GAMCOR_RAMA_EXP_REGION_START_B, mask_sh),\
+ TF_SF(CM0_CM_GAMCOR_RAMA_START_CNTL_B, CM_GAMCOR_RAMA_EXP_REGION_START_SEGMENT_B, mask_sh),\
+ TF_SF(CM0_CM_GAMCOR_RAMA_START_SLOPE_CNTL_B, CM_GAMCOR_RAMA_EXP_REGION_START_SLOPE_B, mask_sh),\
+ TF_SF(CM0_CM_GAMCOR_RAMA_START_BASE_CNTL_B, CM_GAMCOR_RAMA_EXP_REGION_START_BASE_B, mask_sh),\
+ TF_SF(CM0_CM_GAMCOR_RAMA_END_CNTL1_B, CM_GAMCOR_RAMA_EXP_REGION_END_BASE_B, mask_sh),\
+ TF_SF(CM0_CM_GAMCOR_RAMA_END_CNTL2_B, CM_GAMCOR_RAMA_EXP_REGION_END_B, mask_sh),\
+ TF_SF(CM0_CM_GAMCOR_RAMA_END_CNTL2_B, CM_GAMCOR_RAMA_EXP_REGION_END_SLOPE_B, mask_sh),\
+ TF_SF(CM0_CM_GAMCOR_RAMA_OFFSET_B, CM_GAMCOR_RAMA_OFFSET_B, mask_sh),\
+ TF_SF(CM0_CM_GAMCOR_RAMA_REGION_0_1, CM_GAMCOR_RAMA_EXP_REGION0_LUT_OFFSET, mask_sh),\
+ TF_SF(CM0_CM_GAMCOR_RAMA_REGION_0_1, CM_GAMCOR_RAMA_EXP_REGION0_NUM_SEGMENTS, mask_sh),\
+ TF_SF(CM0_CM_GAMCOR_RAMA_REGION_0_1, CM_GAMCOR_RAMA_EXP_REGION1_LUT_OFFSET, mask_sh),\
+ TF_SF(CM0_CM_GAMCOR_RAMA_REGION_0_1, CM_GAMCOR_RAMA_EXP_REGION1_NUM_SEGMENTS, mask_sh),\
+ TF_SF(CM0_CM_GAMUT_REMAP_CONTROL, CM_GAMUT_REMAP_MODE, mask_sh),\
+ TF_SF(CM0_CM_GAMUT_REMAP_CONTROL, CM_GAMUT_REMAP_MODE_CURRENT, mask_sh),\
+ TF_SF(CM0_CM_GAMUT_REMAP_C11_C12, CM_GAMUT_REMAP_C11, mask_sh),\
+ TF_SF(CM0_CM_GAMUT_REMAP_C11_C12, CM_GAMUT_REMAP_C12, mask_sh),\
+ TF_SF(CM0_CM_GAMUT_REMAP_C13_C14, CM_GAMUT_REMAP_C13, mask_sh),\
+ TF_SF(CM0_CM_GAMUT_REMAP_C13_C14, CM_GAMUT_REMAP_C14, mask_sh),\
+ TF_SF(CM0_CM_GAMUT_REMAP_C21_C22, CM_GAMUT_REMAP_C21, mask_sh),\
+ TF_SF(CM0_CM_GAMUT_REMAP_C21_C22, CM_GAMUT_REMAP_C22, mask_sh),\
+ TF_SF(CM0_CM_GAMUT_REMAP_C23_C24, CM_GAMUT_REMAP_C23, mask_sh),\
+ TF_SF(CM0_CM_GAMUT_REMAP_C23_C24, CM_GAMUT_REMAP_C24, mask_sh),\
+ TF_SF(CM0_CM_GAMUT_REMAP_C31_C32, CM_GAMUT_REMAP_C31, mask_sh),\
+ TF_SF(CM0_CM_GAMUT_REMAP_C31_C32, CM_GAMUT_REMAP_C32, mask_sh),\
+ TF_SF(CM0_CM_GAMUT_REMAP_C33_C34, CM_GAMUT_REMAP_C33, mask_sh),\
+ TF_SF(CM0_CM_GAMUT_REMAP_C33_C34, CM_GAMUT_REMAP_C34, mask_sh),\
+ TF_SF(DSCL0_DSCL_EXT_OVERSCAN_LEFT_RIGHT, EXT_OVERSCAN_LEFT, mask_sh),\
+ TF_SF(DSCL0_DSCL_EXT_OVERSCAN_LEFT_RIGHT, EXT_OVERSCAN_RIGHT, mask_sh),\
+ TF_SF(DSCL0_DSCL_EXT_OVERSCAN_TOP_BOTTOM, EXT_OVERSCAN_BOTTOM, mask_sh),\
+ TF_SF(DSCL0_DSCL_EXT_OVERSCAN_TOP_BOTTOM, EXT_OVERSCAN_TOP, mask_sh),\
+ TF_SF(DSCL0_OTG_H_BLANK, OTG_H_BLANK_START, mask_sh),\
+ TF_SF(DSCL0_OTG_H_BLANK, OTG_H_BLANK_END, mask_sh),\
+ TF_SF(DSCL0_OTG_V_BLANK, OTG_V_BLANK_START, mask_sh),\
+ TF_SF(DSCL0_OTG_V_BLANK, OTG_V_BLANK_END, mask_sh),\
+ TF_SF(DSCL0_LB_DATA_FORMAT, INTERLEAVE_EN, mask_sh),\
+ TF2_SF(DSCL0, LB_DATA_FORMAT__ALPHA_EN, mask_sh),\
+ TF_SF(DSCL0_LB_MEMORY_CTRL, MEMORY_CONFIG, mask_sh),\
+ TF_SF(DSCL0_LB_MEMORY_CTRL, LB_MAX_PARTITIONS, mask_sh),\
+ TF_SF(DSCL0_DSCL_AUTOCAL, AUTOCAL_MODE, mask_sh),\
+ TF_SF(DSCL0_DSCL_AUTOCAL, AUTOCAL_NUM_PIPE, mask_sh),\
+ TF_SF(DSCL0_DSCL_AUTOCAL, AUTOCAL_PIPE_ID, mask_sh),\
+ TF_SF(DSCL0_SCL_TAP_CONTROL, SCL_V_NUM_TAPS, mask_sh),\
+ TF_SF(DSCL0_SCL_TAP_CONTROL, SCL_H_NUM_TAPS, mask_sh),\
+ TF_SF(DSCL0_SCL_TAP_CONTROL, SCL_V_NUM_TAPS_C, mask_sh),\
+ TF_SF(DSCL0_SCL_TAP_CONTROL, SCL_H_NUM_TAPS_C, mask_sh),\
+ TF_SF(DSCL0_SCL_COEF_RAM_TAP_SELECT, SCL_COEF_RAM_TAP_PAIR_IDX, mask_sh),\
+ TF_SF(DSCL0_SCL_COEF_RAM_TAP_SELECT, SCL_COEF_RAM_PHASE, mask_sh),\
+ TF_SF(DSCL0_SCL_COEF_RAM_TAP_SELECT, SCL_COEF_RAM_FILTER_TYPE, mask_sh),\
+ TF_SF(DSCL0_SCL_COEF_RAM_TAP_DATA, SCL_COEF_RAM_EVEN_TAP_COEF, mask_sh),\
+ TF_SF(DSCL0_SCL_COEF_RAM_TAP_DATA, SCL_COEF_RAM_EVEN_TAP_COEF_EN, mask_sh),\
+ TF_SF(DSCL0_SCL_COEF_RAM_TAP_DATA, SCL_COEF_RAM_ODD_TAP_COEF, mask_sh),\
+ TF_SF(DSCL0_SCL_COEF_RAM_TAP_DATA, SCL_COEF_RAM_ODD_TAP_COEF_EN, mask_sh),\
+ TF_SF(DSCL0_DSCL_2TAP_CONTROL, SCL_H_2TAP_HARDCODE_COEF_EN, mask_sh),\
+ TF_SF(DSCL0_DSCL_2TAP_CONTROL, SCL_H_2TAP_SHARP_EN, mask_sh),\
+ TF_SF(DSCL0_DSCL_2TAP_CONTROL, SCL_H_2TAP_SHARP_FACTOR, mask_sh),\
+ TF_SF(DSCL0_DSCL_2TAP_CONTROL, SCL_V_2TAP_HARDCODE_COEF_EN, mask_sh),\
+ TF_SF(DSCL0_DSCL_2TAP_CONTROL, SCL_V_2TAP_SHARP_EN, mask_sh),\
+ TF_SF(DSCL0_DSCL_2TAP_CONTROL, SCL_V_2TAP_SHARP_FACTOR, mask_sh),\
+ TF_SF(DSCL0_SCL_MODE, SCL_COEF_RAM_SELECT, mask_sh),\
+ TF_SF(DSCL0_SCL_MODE, DSCL_MODE, mask_sh),\
+ TF_SF(DSCL0_RECOUT_START, RECOUT_START_X, mask_sh),\
+ TF_SF(DSCL0_RECOUT_START, RECOUT_START_Y, mask_sh),\
+ TF_SF(DSCL0_RECOUT_SIZE, RECOUT_WIDTH, mask_sh),\
+ TF_SF(DSCL0_RECOUT_SIZE, RECOUT_HEIGHT, mask_sh),\
+ TF_SF(DSCL0_MPC_SIZE, MPC_WIDTH, mask_sh),\
+ TF_SF(DSCL0_MPC_SIZE, MPC_HEIGHT, mask_sh),\
+ TF_SF(DSCL0_SCL_HORZ_FILTER_SCALE_RATIO, SCL_H_SCALE_RATIO, mask_sh),\
+ TF_SF(DSCL0_SCL_VERT_FILTER_SCALE_RATIO, SCL_V_SCALE_RATIO, mask_sh),\
+ TF_SF(DSCL0_SCL_HORZ_FILTER_SCALE_RATIO_C, SCL_H_SCALE_RATIO_C, mask_sh),\
+ TF_SF(DSCL0_SCL_VERT_FILTER_SCALE_RATIO_C, SCL_V_SCALE_RATIO_C, mask_sh),\
+ TF_SF(DSCL0_SCL_HORZ_FILTER_INIT, SCL_H_INIT_FRAC, mask_sh),\
+ TF_SF(DSCL0_SCL_HORZ_FILTER_INIT, SCL_H_INIT_INT, mask_sh),\
+ TF_SF(DSCL0_SCL_HORZ_FILTER_INIT_C, SCL_H_INIT_FRAC_C, mask_sh),\
+ TF_SF(DSCL0_SCL_HORZ_FILTER_INIT_C, SCL_H_INIT_INT_C, mask_sh),\
+ TF_SF(DSCL0_SCL_VERT_FILTER_INIT, SCL_V_INIT_FRAC, mask_sh),\
+ TF_SF(DSCL0_SCL_VERT_FILTER_INIT, SCL_V_INIT_INT, mask_sh),\
+ TF_SF(DSCL0_SCL_VERT_FILTER_INIT_C, SCL_V_INIT_FRAC_C, mask_sh),\
+ TF_SF(DSCL0_SCL_VERT_FILTER_INIT_C, SCL_V_INIT_INT_C, mask_sh),\
+ TF_SF(DSCL0_SCL_MODE, SCL_CHROMA_COEF_MODE, mask_sh),\
+ TF_SF(DSCL0_SCL_MODE, SCL_COEF_RAM_SELECT_CURRENT, mask_sh), \
+ TF_SF(CNVC_CFG0_PRE_DEALPHA, PRE_DEALPHA_EN, mask_sh), \
+ TF_SF(CNVC_CFG0_PRE_DEALPHA, PRE_DEALPHA_ABLND_EN, mask_sh), \
+ TF_SF(CNVC_CFG0_PRE_REALPHA, PRE_REALPHA_EN, mask_sh), \
+ TF_SF(CNVC_CFG0_PRE_REALPHA, PRE_REALPHA_ABLND_EN, mask_sh), \
+ TF_SF(CNVC_CFG0_PRE_CSC_MODE, PRE_CSC_MODE, mask_sh), \
+ TF_SF(CNVC_CFG0_PRE_CSC_MODE, PRE_CSC_MODE_CURRENT, mask_sh), \
+ TF_SF(CNVC_CFG0_PRE_CSC_C11_C12, PRE_CSC_C11, mask_sh), \
+ TF_SF(CNVC_CFG0_PRE_CSC_C11_C12, PRE_CSC_C12, mask_sh), \
+ TF_SF(CNVC_CFG0_PRE_CSC_C33_C34, PRE_CSC_C33, mask_sh), \
+ TF_SF(CNVC_CFG0_PRE_CSC_C33_C34, PRE_CSC_C34, mask_sh), \
+ TF_SF(CM0_CM_POST_CSC_CONTROL, CM_POST_CSC_MODE, mask_sh), \
+ TF_SF(CM0_CM_POST_CSC_CONTROL, CM_POST_CSC_MODE_CURRENT, mask_sh), \
+ TF_SF(CM0_CM_POST_CSC_C11_C12, CM_POST_CSC_C11, mask_sh), \
+ TF_SF(CM0_CM_POST_CSC_C11_C12, CM_POST_CSC_C12, mask_sh), \
+ TF_SF(CM0_CM_POST_CSC_C33_C34, CM_POST_CSC_C33, mask_sh), \
+ TF_SF(CM0_CM_POST_CSC_C33_C34, CM_POST_CSC_C34, mask_sh), \
+ TF_SF(CNVC_CFG0_FORMAT_CONTROL, CNVC_BYPASS, mask_sh), \
+ TF2_SF(CNVC_CFG0, FORMAT_CONTROL__ALPHA_EN, mask_sh), \
+ TF_SF(CNVC_CFG0_FORMAT_CONTROL, FORMAT_EXPANSION_MODE, mask_sh), \
+ TF_SF(CNVC_CFG0_CNVC_SURFACE_PIXEL_FORMAT, CNVC_SURFACE_PIXEL_FORMAT, mask_sh), \
+ TF_SF(CNVC_CFG0_CNVC_SURFACE_PIXEL_FORMAT, CNVC_ALPHA_PLANE_ENABLE, mask_sh), \
+ TF_SF(CNVC_CUR0_CURSOR0_CONTROL, CUR0_MODE, mask_sh), \
+ TF_SF(CNVC_CUR0_CURSOR0_CONTROL, CUR0_EXPANSION_MODE, mask_sh), \
+ TF_SF(CNVC_CUR0_CURSOR0_CONTROL, CUR0_ENABLE, mask_sh), \
+ TF_SF(CNVC_CUR0_CURSOR0_COLOR0, CUR0_COLOR0, mask_sh), \
+ TF_SF(CNVC_CUR0_CURSOR0_COLOR1, CUR0_COLOR1, mask_sh), \
+ TF_SF(CNVC_CUR0_CURSOR0_FP_SCALE_BIAS, CUR0_FP_BIAS, mask_sh), \
+ TF_SF(CNVC_CUR0_CURSOR0_FP_SCALE_BIAS, CUR0_FP_SCALE, mask_sh), \
+ TF_SF(DPP_TOP0_DPP_CONTROL, DPP_CLOCK_ENABLE, mask_sh), \
+ TF_SF(CM0_CM_HDR_MULT_COEF, CM_HDR_MULT_COEF, mask_sh), \
+ TF_SF(CM0_CM_CONTROL, CM_BYPASS, mask_sh), \
+ TF_SF(CURSOR0_0_CURSOR_CONTROL, CURSOR_MODE, mask_sh), \
+ TF_SF(CURSOR0_0_CURSOR_CONTROL, CURSOR_PITCH, mask_sh), \
+ TF_SF(CURSOR0_0_CURSOR_CONTROL, CURSOR_LINES_PER_CHUNK, mask_sh), \
+ TF_SF(CURSOR0_0_CURSOR_CONTROL, CURSOR_ENABLE, mask_sh), \
+ TF_SF(CNVC_CFG0_FORMAT_CONTROL, FORMAT_CNV16, mask_sh), \
+ TF_SF(CNVC_CFG0_FORMAT_CONTROL, CNVC_BYPASS_MSB_ALIGN, mask_sh), \
+ TF_SF(CNVC_CFG0_FORMAT_CONTROL, CLAMP_POSITIVE, mask_sh), \
+ TF_SF(CNVC_CFG0_FORMAT_CONTROL, CLAMP_POSITIVE_C, mask_sh), \
+ TF_SF(CNVC_CFG0_FORMAT_CONTROL, FORMAT_CROSSBAR_R, mask_sh), \
+ TF_SF(CNVC_CFG0_FORMAT_CONTROL, FORMAT_CROSSBAR_G, mask_sh), \
+ TF_SF(CNVC_CFG0_FORMAT_CONTROL, FORMAT_CROSSBAR_B, mask_sh), \
+ TF_SF(CNVC_CFG0_ALPHA_2BIT_LUT, ALPHA_2BIT_LUT0, mask_sh), \
+ TF_SF(CNVC_CFG0_ALPHA_2BIT_LUT, ALPHA_2BIT_LUT1, mask_sh), \
+ TF_SF(CNVC_CFG0_ALPHA_2BIT_LUT, ALPHA_2BIT_LUT2, mask_sh), \
+ TF_SF(CNVC_CFG0_ALPHA_2BIT_LUT, ALPHA_2BIT_LUT3, mask_sh), \
+ TF_SF(CNVC_CFG0_FCNV_FP_BIAS_R, FCNV_FP_BIAS_R, mask_sh), \
+ TF_SF(CNVC_CFG0_FCNV_FP_BIAS_G, FCNV_FP_BIAS_G, mask_sh), \
+ TF_SF(CNVC_CFG0_FCNV_FP_BIAS_B, FCNV_FP_BIAS_B, mask_sh), \
+ TF_SF(CNVC_CFG0_FCNV_FP_SCALE_R, FCNV_FP_SCALE_R, mask_sh), \
+ TF_SF(CNVC_CFG0_FCNV_FP_SCALE_G, FCNV_FP_SCALE_G, mask_sh), \
+ TF_SF(CNVC_CFG0_FCNV_FP_SCALE_B, FCNV_FP_SCALE_B, mask_sh), \
+ TF_SF(CNVC_CFG0_COLOR_KEYER_CONTROL, COLOR_KEYER_EN, mask_sh), \
+ TF_SF(CNVC_CFG0_COLOR_KEYER_CONTROL, COLOR_KEYER_MODE, mask_sh), \
+ TF_SF(CNVC_CFG0_COLOR_KEYER_ALPHA, COLOR_KEYER_ALPHA_LOW, mask_sh), \
+ TF_SF(CNVC_CFG0_COLOR_KEYER_ALPHA, COLOR_KEYER_ALPHA_HIGH, mask_sh), \
+ TF_SF(CNVC_CFG0_COLOR_KEYER_RED, COLOR_KEYER_RED_LOW, mask_sh), \
+ TF_SF(CNVC_CFG0_COLOR_KEYER_RED, COLOR_KEYER_RED_HIGH, mask_sh), \
+ TF_SF(CNVC_CFG0_COLOR_KEYER_GREEN, COLOR_KEYER_GREEN_LOW, mask_sh), \
+ TF_SF(CNVC_CFG0_COLOR_KEYER_GREEN, COLOR_KEYER_GREEN_HIGH, mask_sh), \
+ TF_SF(CNVC_CFG0_COLOR_KEYER_BLUE, COLOR_KEYER_BLUE_LOW, mask_sh), \
+ TF_SF(CNVC_CFG0_COLOR_KEYER_BLUE, COLOR_KEYER_BLUE_HIGH, mask_sh), \
+ TF_SF(CNVC_CUR0_CURSOR0_CONTROL, CUR0_PIX_INV_MODE, mask_sh), \
+ TF_SF(CNVC_CUR0_CURSOR0_CONTROL, CUR0_PIXEL_ALPHA_MOD_EN, mask_sh), \
+ TF_SF(CNVC_CUR0_CURSOR0_CONTROL, CUR0_ROM_EN, mask_sh),\
+ TF_SF(DSCL0_OBUF_MEM_PWR_CTRL, OBUF_MEM_PWR_FORCE, mask_sh),\
+ TF_SF(DSCL0_DSCL_MEM_PWR_CTRL, LUT_MEM_PWR_FORCE, mask_sh)
+
+#define DPP_REG_LIST_SH_MASK_DCN30_UPDATED(mask_sh)\
+ TF_SF(CM0_CM_BLNDGAM_CONTROL, CM_BLNDGAM_MODE, mask_sh), \
+ TF_SF(CM0_CM_BLNDGAM_CONTROL, CM_BLNDGAM_MODE_CURRENT, mask_sh), \
+ TF_SF(CM0_CM_BLNDGAM_CONTROL, CM_BLNDGAM_SELECT_CURRENT, mask_sh), \
+ TF_SF(CM0_CM_BLNDGAM_CONTROL, CM_BLNDGAM_SELECT, mask_sh), \
+ TF_SF(CM0_CM_BLNDGAM_RAMB_START_SLOPE_CNTL_B, CM_BLNDGAM_RAMB_EXP_REGION_START_SLOPE_B, mask_sh), \
+ TF_SF(CM0_CM_BLNDGAM_RAMB_START_SLOPE_CNTL_G, CM_BLNDGAM_RAMB_EXP_REGION_START_SLOPE_G, mask_sh), \
+ TF_SF(CM0_CM_BLNDGAM_RAMB_START_SLOPE_CNTL_R, CM_BLNDGAM_RAMB_EXP_REGION_START_SLOPE_R, mask_sh), \
+ TF_SF(CM0_CM_BLNDGAM_RAMB_END_CNTL1_B, CM_BLNDGAM_RAMB_EXP_REGION_END_BASE_B, mask_sh), \
+ TF_SF(CM0_CM_BLNDGAM_RAMB_END_CNTL1_G, CM_BLNDGAM_RAMB_EXP_REGION_END_BASE_G, mask_sh), \
+ TF_SF(CM0_CM_BLNDGAM_RAMB_END_CNTL1_R, CM_BLNDGAM_RAMB_EXP_REGION_END_BASE_R, mask_sh), \
+ TF_SF(CM0_CM_BLNDGAM_RAMA_START_SLOPE_CNTL_B, CM_BLNDGAM_RAMA_EXP_REGION_START_SLOPE_B, mask_sh), \
+ TF_SF(CM0_CM_BLNDGAM_RAMA_START_SLOPE_CNTL_G, CM_BLNDGAM_RAMA_EXP_REGION_START_SLOPE_G, mask_sh), \
+ TF_SF(CM0_CM_BLNDGAM_RAMA_START_SLOPE_CNTL_R, CM_BLNDGAM_RAMA_EXP_REGION_START_SLOPE_R, mask_sh), \
+ TF_SF(CM0_CM_BLNDGAM_RAMA_END_CNTL1_B, CM_BLNDGAM_RAMA_EXP_REGION_END_BASE_B, mask_sh), \
+ TF_SF(CM0_CM_BLNDGAM_RAMA_END_CNTL1_G, CM_BLNDGAM_RAMA_EXP_REGION_END_BASE_G, mask_sh), \
+ TF_SF(CM0_CM_BLNDGAM_RAMA_END_CNTL1_R, CM_BLNDGAM_RAMA_EXP_REGION_END_BASE_R, mask_sh), \
+ TF_SF(CM0_CM_BLNDGAM_RAMA_END_CNTL2_B, CM_BLNDGAM_RAMA_EXP_REGION_END_B, mask_sh), \
+ TF_SF(CM0_CM_BLNDGAM_RAMA_END_CNTL2_G, CM_BLNDGAM_RAMA_EXP_REGION_END_G, mask_sh), \
+ TF_SF(CM0_CM_BLNDGAM_RAMA_END_CNTL2_R, CM_BLNDGAM_RAMA_EXP_REGION_END_R, mask_sh), \
+ TF_SF(CM0_CM_BLNDGAM_LUT_CONTROL, CM_BLNDGAM_LUT_WRITE_COLOR_MASK, mask_sh), \
+ TF_SF(CM0_CM_BLNDGAM_LUT_CONTROL, CM_BLNDGAM_LUT_HOST_SEL, mask_sh), \
+ TF_SF(CM0_CM_BLNDGAM_LUT_CONTROL, CM_BLNDGAM_LUT_CONFIG_MODE, mask_sh), \
+ TF_SF(CM0_CM_3DLUT_MODE, CM_3DLUT_MODE_CURRENT, mask_sh), \
+ TF_SF(CM0_CM_SHAPER_CONTROL, CM_SHAPER_MODE_CURRENT, mask_sh)
+
+
+#define DPP_REG_LIST_SH_MASK_DCN30(mask_sh)\
+ DPP_REG_LIST_SH_MASK_DCN30_COMMON(mask_sh), \
+ TF_REG_LIST_SH_MASK_DCN20_COMMON(mask_sh), \
+ DPP_REG_LIST_SH_MASK_DCN30_UPDATED(mask_sh)
+
+#define DPP_REG_FIELD_LIST_DCN3(type) \
+ TF_REG_FIELD_LIST_DCN2_0(type); \
+ type FORMAT_CROSSBAR_R; \
+ type FORMAT_CROSSBAR_G; \
+ type FORMAT_CROSSBAR_B; \
+ type CM_DEALPHA_EN;\
+ type CM_DEALPHA_ABLND;\
+ type CM_BIAS_Y_G;\
+ type CM_BIAS_CB_B;\
+ type CM_BIAS_CR_R;\
+ type GAMCOR_MEM_PWR_DIS; \
+ type GAMCOR_MEM_PWR_FORCE; \
+ type PRE_DEGAM_MODE;\
+ type PRE_DEGAM_SELECT;\
+ type CNVC_ALPHA_PLANE_ENABLE; \
+ type PRE_DEALPHA_EN; \
+ type PRE_DEALPHA_ABLND_EN; \
+ type PRE_REALPHA_EN; \
+ type PRE_REALPHA_ABLND_EN; \
+ type PRE_CSC_MODE; \
+ type PRE_CSC_MODE_CURRENT; \
+ type PRE_CSC_C11; \
+ type PRE_CSC_C12; \
+ type PRE_CSC_C33; \
+ type PRE_CSC_C34; \
+ type CM_POST_CSC_MODE; \
+ type CM_POST_CSC_MODE_CURRENT; \
+ type CM_POST_CSC_C11; \
+ type CM_POST_CSC_C12; \
+ type CM_POST_CSC_C33; \
+ type CM_POST_CSC_C34; \
+ type CM_GAMCOR_MODE; \
+ type CM_GAMCOR_SELECT; \
+ type CM_GAMCOR_PWL_DISABLE; \
+ type CM_GAMCOR_MODE_CURRENT; \
+ type CM_GAMCOR_SELECT_CURRENT; \
+ type CM_GAMCOR_LUT_INDEX; \
+ type CM_GAMCOR_LUT_DATA; \
+ type CM_GAMCOR_LUT_WRITE_COLOR_MASK; \
+ type CM_GAMCOR_LUT_READ_COLOR_SEL; \
+ type CM_GAMCOR_LUT_READ_DBG; \
+ type CM_GAMCOR_LUT_HOST_SEL; \
+ type CM_GAMCOR_LUT_CONFIG_MODE; \
+ type CM_GAMCOR_LUT_STATUS; \
+ type CM_GAMCOR_RAMA_EXP_REGION_START_B; \
+ type CM_GAMCOR_RAMA_EXP_REGION_START_SEGMENT_B; \
+ type CM_GAMCOR_RAMA_EXP_REGION_START_SLOPE_B; \
+ type CM_GAMCOR_RAMA_EXP_REGION_START_BASE_B; \
+ type CM_GAMCOR_RAMA_EXP_REGION_END_BASE_B; \
+ type CM_GAMCOR_RAMA_EXP_REGION_END_B; \
+ type CM_GAMCOR_RAMA_EXP_REGION_END_SLOPE_B; \
+ type CM_GAMCOR_RAMA_OFFSET_B; \
+ type CM_GAMCOR_RAMA_EXP_REGION0_LUT_OFFSET; \
+ type CM_GAMCOR_RAMA_EXP_REGION0_NUM_SEGMENTS; \
+ type CM_GAMCOR_RAMA_EXP_REGION1_LUT_OFFSET; \
+ type CM_GAMCOR_RAMA_EXP_REGION1_NUM_SEGMENTS;\
+ type CM_GAMUT_REMAP_MODE_CURRENT;\
+ type CM_BLNDGAM_RAMB_EXP_REGION_START_SLOPE_B; \
+ type CM_BLNDGAM_RAMB_EXP_REGION_START_SLOPE_G; \
+ type CM_BLNDGAM_RAMB_EXP_REGION_START_SLOPE_R; \
+ type CM_BLNDGAM_RAMA_EXP_REGION_START_SLOPE_B; \
+ type CM_BLNDGAM_RAMA_EXP_REGION_START_SLOPE_G; \
+ type CM_BLNDGAM_RAMA_EXP_REGION_START_SLOPE_R; \
+ type CM_BLNDGAM_LUT_WRITE_COLOR_MASK; \
+ type CM_BLNDGAM_LUT_HOST_SEL; \
+ type CM_BLNDGAM_LUT_CONFIG_MODE; \
+ type CM_3DLUT_MODE_CURRENT; \
+ type CM_SHAPER_MODE_CURRENT; \
+ type CM_BLNDGAM_MODE; \
+ type CM_BLNDGAM_MODE_CURRENT; \
+ type CM_BLNDGAM_SELECT_CURRENT; \
+ type CM_BLNDGAM_SELECT; \
+ type GAMCOR_MEM_PWR_STATE
+
+struct dcn3_dpp_shift {
+ DPP_REG_FIELD_LIST_DCN3(uint8_t);
+};
+
+struct dcn3_dpp_mask {
+ DPP_REG_FIELD_LIST_DCN3(uint32_t);
+};
+
+#define DPP_DCN3_REG_VARIABLE_LIST_COMMON \
+ DPP_DCN2_REG_VARIABLE_LIST; \
+ uint32_t CM_MEM_PWR_STATUS;\
+ uint32_t CM_DEALPHA;\
+ uint32_t CM_BIAS_CR_R;\
+ uint32_t CM_BIAS_Y_G_CB_B;\
+ uint32_t PRE_DEGAM;\
+ uint32_t PRE_DEALPHA; \
+ uint32_t PRE_REALPHA; \
+ uint32_t PRE_CSC_MODE; \
+ uint32_t PRE_CSC_C11_C12; \
+ uint32_t PRE_CSC_C33_C34; \
+ uint32_t PRE_CSC_B_C11_C12; \
+ uint32_t PRE_CSC_B_C33_C34; \
+ uint32_t CM_POST_CSC_CONTROL; \
+ uint32_t CM_POST_CSC_C11_C12; \
+ uint32_t CM_POST_CSC_C33_C34; \
+ uint32_t CM_POST_CSC_B_C11_C12; \
+ uint32_t CM_POST_CSC_B_C33_C34; \
+ uint32_t CM_GAMUT_REMAP_B_C11_C12; \
+ uint32_t CM_GAMUT_REMAP_B_C13_C14; \
+ uint32_t CM_GAMUT_REMAP_B_C21_C22; \
+ uint32_t CM_GAMUT_REMAP_B_C23_C24; \
+ uint32_t CM_GAMUT_REMAP_B_C31_C32; \
+ uint32_t CM_GAMUT_REMAP_B_C33_C34; \
+ uint32_t CM_GAMCOR_CONTROL; \
+ uint32_t CM_GAMCOR_LUT_CONTROL; \
+ uint32_t CM_GAMCOR_LUT_INDEX; \
+ uint32_t CM_GAMCOR_LUT_DATA; \
+ uint32_t CM_GAMCOR_RAMB_START_CNTL_B; \
+ uint32_t CM_GAMCOR_RAMB_START_CNTL_G; \
+ uint32_t CM_GAMCOR_RAMB_START_CNTL_R; \
+ uint32_t CM_GAMCOR_RAMB_START_SLOPE_CNTL_B; \
+ uint32_t CM_GAMCOR_RAMB_START_SLOPE_CNTL_G; \
+ uint32_t CM_GAMCOR_RAMB_START_SLOPE_CNTL_R; \
+ uint32_t CM_GAMCOR_RAMB_END_CNTL1_B; \
+ uint32_t CM_GAMCOR_RAMB_END_CNTL2_B; \
+ uint32_t CM_GAMCOR_RAMB_END_CNTL1_G; \
+ uint32_t CM_GAMCOR_RAMB_END_CNTL2_G; \
+ uint32_t CM_GAMCOR_RAMB_END_CNTL1_R; \
+ uint32_t CM_GAMCOR_RAMB_END_CNTL2_R; \
+ uint32_t CM_GAMCOR_RAMB_REGION_0_1; \
+ uint32_t CM_GAMCOR_RAMB_REGION_32_33; \
+ uint32_t CM_GAMCOR_RAMB_OFFSET_B; \
+ uint32_t CM_GAMCOR_RAMB_OFFSET_G; \
+ uint32_t CM_GAMCOR_RAMB_OFFSET_R; \
+ uint32_t CM_GAMCOR_RAMB_START_BASE_CNTL_B; \
+ uint32_t CM_GAMCOR_RAMB_START_BASE_CNTL_G; \
+ uint32_t CM_GAMCOR_RAMB_START_BASE_CNTL_R; \
+ uint32_t CM_GAMCOR_RAMA_START_CNTL_B; \
+ uint32_t CM_GAMCOR_RAMA_START_CNTL_G; \
+ uint32_t CM_GAMCOR_RAMA_START_CNTL_R; \
+ uint32_t CM_GAMCOR_RAMA_START_SLOPE_CNTL_B; \
+ uint32_t CM_GAMCOR_RAMA_START_SLOPE_CNTL_G; \
+ uint32_t CM_GAMCOR_RAMA_START_SLOPE_CNTL_R; \
+ uint32_t CM_GAMCOR_RAMA_END_CNTL1_B; \
+ uint32_t CM_GAMCOR_RAMA_END_CNTL2_B; \
+ uint32_t CM_GAMCOR_RAMA_END_CNTL1_G; \
+ uint32_t CM_GAMCOR_RAMA_END_CNTL2_G; \
+ uint32_t CM_GAMCOR_RAMA_END_CNTL1_R; \
+ uint32_t CM_GAMCOR_RAMA_END_CNTL2_R; \
+ uint32_t CM_GAMCOR_RAMA_REGION_0_1; \
+ uint32_t CM_GAMCOR_RAMA_REGION_32_33; \
+ uint32_t CM_GAMCOR_RAMA_OFFSET_B; \
+ uint32_t CM_GAMCOR_RAMA_OFFSET_G; \
+ uint32_t CM_GAMCOR_RAMA_OFFSET_R; \
+ uint32_t CM_GAMCOR_RAMA_START_BASE_CNTL_B; \
+ uint32_t CM_GAMCOR_RAMA_START_BASE_CNTL_G; \
+ uint32_t CM_GAMCOR_RAMA_START_BASE_CNTL_R; \
+ uint32_t CM_BLNDGAM_RAMA_START_SLOPE_CNTL_B; \
+ uint32_t CM_BLNDGAM_RAMA_START_SLOPE_CNTL_G; \
+ uint32_t CM_BLNDGAM_RAMA_START_SLOPE_CNTL_R; \
+ uint32_t CM_BLNDGAM_RAMB_START_SLOPE_CNTL_B; \
+ uint32_t CM_BLNDGAM_RAMB_START_SLOPE_CNTL_G; \
+ uint32_t CM_BLNDGAM_RAMB_START_SLOPE_CNTL_R; \
+ uint32_t CM_BLNDGAM_LUT_CONTROL
+
+
+struct dcn3_dpp_registers {
+ DPP_DCN3_REG_VARIABLE_LIST_COMMON;
+};
+
+
+struct dcn3_dpp {
+ struct dpp base;
+
+ const struct dcn3_dpp_registers *tf_regs;
+ const struct dcn3_dpp_shift *tf_shift;
+ const struct dcn3_dpp_mask *tf_mask;
+
+ const uint16_t *filter_v;
+ const uint16_t *filter_h;
+ const uint16_t *filter_v_c;
+ const uint16_t *filter_h_c;
+ int lb_pixel_depth_supported;
+ int lb_memory_size;
+ int lb_bits_per_entry;
+ bool is_write_to_ram_a_safe;
+ struct scaler_data scl_data;
+ struct pwl_params pwl_data;
+};
+
+bool dpp3_construct(struct dcn3_dpp *dpp3,
+ struct dc_context *ctx,
+ uint32_t inst,
+ const struct dcn3_dpp_registers *tf_regs,
+ const struct dcn3_dpp_shift *tf_shift,
+ const struct dcn3_dpp_mask *tf_mask);
+
+bool dpp3_program_gamcor_lut(
+ struct dpp *dpp_base, const struct pwl_params *params);
+
+void dpp3_program_CM_dealpha(
+ struct dpp *dpp_base,
+ uint32_t enable, uint32_t additive_blending);
+
+void dpp3_program_CM_bias(
+ struct dpp *dpp_base,
+ struct CM_bias_params *bias_params);
+
+void dpp3_set_hdr_multiplier(
+ struct dpp *dpp_base,
+ uint32_t multiplier);
+
+void dpp3_cm_set_gamut_remap(
+ struct dpp *dpp_base,
+ const struct dpp_grph_csc_adjustment *adjust);
+
+void dpp3_set_pre_degam(struct dpp *dpp_base,
+ uint32_t degamma_lut_selection);
+
+void dpp3_set_cursor_attributes(
+ struct dpp *dpp_base,
+ struct dc_cursor_attributes *cursor_attributes);
+
+void dpp3_program_post_csc(
+ struct dpp *dpp_base,
+ enum dc_color_space color_space,
+ enum dcn10_input_csc_select input_select,
+ const struct out_csc_color_matrix *tbl_entry);
+
+void dpp3_program_cm_bias(
+ struct dpp *dpp_base,
+ struct CM_bias_params *bias_params);
+
+void dpp3_program_cm_dealpha(
+ struct dpp *dpp_base,
+ uint32_t enable, uint32_t additive_blending);
+
+#endif /* __DC_HWSS_DCN30_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dpp_cm.c b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dpp_cm.c
new file mode 100644
index 000000000000..9ab63c72f21c
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dpp_cm.c
@@ -0,0 +1,410 @@
+/*
+ * Copyright 2020 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#include "dm_services.h"
+#include "core_types.h"
+#include "reg_helper.h"
+#include "dcn30_dpp.h"
+#include "basics/conversion.h"
+#include "dcn30_cm_common.h"
+
+#define REG(reg)\
+ dpp->tf_regs->reg
+
+#define CTX \
+ dpp->base.ctx
+
+#undef FN
+#define FN(reg_name, field_name) \
+ dpp->tf_shift->field_name, dpp->tf_mask->field_name
+
+static void dpp3_enable_cm_block(
+ struct dpp *dpp_base)
+{
+ struct dcn3_dpp *dpp = TO_DCN30_DPP(dpp_base);
+
+ unsigned int cm_bypass_mode = 0;
+
+ // debug option: put CM in bypass mode
+ if (dpp_base->ctx->dc->debug.cm_in_bypass)
+ cm_bypass_mode = 1;
+
+ REG_UPDATE(CM_CONTROL, CM_BYPASS, cm_bypass_mode);
+}
+
+static enum dc_lut_mode dpp30_get_gamcor_current(struct dpp *dpp_base)
+{
+ enum dc_lut_mode mode;
+ uint32_t state_mode;
+ uint32_t lut_mode;
+ struct dcn3_dpp *dpp = TO_DCN30_DPP(dpp_base);
+
+ REG_GET(CM_GAMCOR_CONTROL,
+ CM_GAMCOR_MODE_CURRENT, &state_mode);
+
+ if (state_mode == 0)
+ mode = LUT_BYPASS;
+
+ if (state_mode == 2) {//Programmable RAM LUT
+ REG_GET(CM_GAMCOR_CONTROL,
+ CM_GAMCOR_SELECT_CURRENT, &lut_mode);
+
+ if (lut_mode == 0)
+ mode = LUT_RAM_A;
+ else
+ mode = LUT_RAM_B;
+ }
+
+ return mode;
+}
+
+static void dpp3_program_gammcor_lut(
+ struct dpp *dpp_base,
+ const struct pwl_result_data *rgb,
+ uint32_t num,
+ bool is_ram_a)
+{
+ uint32_t i;
+ struct dcn3_dpp *dpp = TO_DCN30_DPP(dpp_base);
+ uint32_t last_base_value_red = rgb[num-1].red_reg + rgb[num-1].delta_red_reg;
+ uint32_t last_base_value_green = rgb[num-1].green_reg + rgb[num-1].delta_green_reg;
+ uint32_t last_base_value_blue = rgb[num-1].blue_reg + rgb[num-1].delta_blue_reg;
+
+ /*fill in the LUT with all base values to be used by pwl module
+ * HW auto increments the LUT index: back-to-back write
+ */
+ if (is_rgb_equal(rgb, num)) {
+ for (i = 0 ; i < num; i++)
+ REG_SET(CM_GAMCOR_LUT_DATA, 0, CM_GAMCOR_LUT_DATA, rgb[i].red_reg);
+
+ REG_SET(CM_GAMCOR_LUT_DATA, 0, CM_GAMCOR_LUT_DATA, last_base_value_red);
+
+ } else {
+ REG_UPDATE(CM_GAMCOR_LUT_CONTROL,
+ CM_GAMCOR_LUT_WRITE_COLOR_MASK, 4);
+ for (i = 0 ; i < num; i++)
+ REG_SET(CM_GAMCOR_LUT_DATA, 0, CM_GAMCOR_LUT_DATA, rgb[i].red_reg);
+
+ REG_SET(CM_GAMCOR_LUT_DATA, 0, CM_GAMCOR_LUT_DATA, last_base_value_red);
+
+ REG_SET(CM_GAMCOR_LUT_INDEX, 0, CM_GAMCOR_LUT_INDEX, 0);
+
+ REG_UPDATE(CM_GAMCOR_LUT_CONTROL,
+ CM_GAMCOR_LUT_WRITE_COLOR_MASK, 2);
+ for (i = 0 ; i < num; i++)
+ REG_SET(CM_GAMCOR_LUT_DATA, 0, CM_GAMCOR_LUT_DATA, rgb[i].green_reg);
+
+ REG_SET(CM_GAMCOR_LUT_DATA, 0, CM_GAMCOR_LUT_DATA, last_base_value_green);
+
+ REG_SET(CM_GAMCOR_LUT_INDEX, 0, CM_GAMCOR_LUT_INDEX, 0);
+
+ REG_UPDATE(CM_GAMCOR_LUT_CONTROL,
+ CM_GAMCOR_LUT_WRITE_COLOR_MASK, 1);
+ for (i = 0 ; i < num; i++)
+ REG_SET(CM_GAMCOR_LUT_DATA, 0, CM_GAMCOR_LUT_DATA, rgb[i].blue_reg);
+
+ REG_SET(CM_GAMCOR_LUT_DATA, 0, CM_GAMCOR_LUT_DATA, last_base_value_blue);
+ }
+}
+
+static void dpp3_power_on_gamcor_lut(
+ struct dpp *dpp_base,
+ bool power_on)
+{
+ uint32_t power_status;
+ struct dcn3_dpp *dpp = TO_DCN30_DPP(dpp_base);
+
+
+ REG_SET(CM_MEM_PWR_CTRL, 0,
+ GAMCOR_MEM_PWR_DIS, power_on == true ? 0:1);
+
+ REG_GET(CM_MEM_PWR_STATUS, GAMCOR_MEM_PWR_STATE, &power_status);
+ if (power_status != 0)
+ BREAK_TO_DEBUGGER();
+
+
+}
+
+void dpp3_program_cm_dealpha(
+ struct dpp *dpp_base,
+ uint32_t enable, uint32_t additive_blending)
+{
+ struct dcn3_dpp *dpp = TO_DCN30_DPP(dpp_base);
+
+ REG_SET_2(CM_DEALPHA, 0,
+ CM_DEALPHA_EN, enable,
+ CM_DEALPHA_ABLND, additive_blending);
+}
+
+void dpp3_program_cm_bias(
+ struct dpp *dpp_base,
+ struct CM_bias_params *bias_params)
+{
+ struct dcn3_dpp *dpp = TO_DCN30_DPP(dpp_base);
+
+ REG_SET(CM_BIAS_CR_R, 0, CM_BIAS_CR_R, bias_params->cm_bias_cr_r);
+ REG_SET_2(CM_BIAS_Y_G_CB_B, 0,
+ CM_BIAS_Y_G, bias_params->cm_bias_y_g,
+ CM_BIAS_CB_B, bias_params->cm_bias_cb_b);
+}
+
+static void dpp3_gamcor_reg_field(
+ struct dcn3_dpp *dpp,
+ struct dcn3_xfer_func_reg *reg)
+{
+
+ reg->shifts.field_region_start_base = dpp->tf_shift->CM_GAMCOR_RAMA_EXP_REGION_START_BASE_B;
+ reg->masks.field_region_start_base = dpp->tf_mask->CM_GAMCOR_RAMA_EXP_REGION_START_BASE_B;
+ reg->shifts.field_offset = dpp->tf_shift->CM_GAMCOR_RAMA_OFFSET_B;
+ reg->masks.field_offset = dpp->tf_mask->CM_GAMCOR_RAMA_OFFSET_B;
+
+ reg->shifts.exp_region0_lut_offset = dpp->tf_shift->CM_GAMCOR_RAMA_EXP_REGION0_LUT_OFFSET;
+ reg->masks.exp_region0_lut_offset = dpp->tf_mask->CM_GAMCOR_RAMA_EXP_REGION0_LUT_OFFSET;
+ reg->shifts.exp_region0_num_segments = dpp->tf_shift->CM_GAMCOR_RAMA_EXP_REGION0_NUM_SEGMENTS;
+ reg->masks.exp_region0_num_segments = dpp->tf_mask->CM_GAMCOR_RAMA_EXP_REGION0_NUM_SEGMENTS;
+ reg->shifts.exp_region1_lut_offset = dpp->tf_shift->CM_GAMCOR_RAMA_EXP_REGION1_LUT_OFFSET;
+ reg->masks.exp_region1_lut_offset = dpp->tf_mask->CM_GAMCOR_RAMA_EXP_REGION1_LUT_OFFSET;
+ reg->shifts.exp_region1_num_segments = dpp->tf_shift->CM_GAMCOR_RAMA_EXP_REGION1_NUM_SEGMENTS;
+ reg->masks.exp_region1_num_segments = dpp->tf_mask->CM_GAMCOR_RAMA_EXP_REGION1_NUM_SEGMENTS;
+
+ reg->shifts.field_region_end = dpp->tf_shift->CM_GAMCOR_RAMA_EXP_REGION_END_B;
+ reg->masks.field_region_end = dpp->tf_mask->CM_GAMCOR_RAMA_EXP_REGION_END_B;
+ reg->shifts.field_region_end_slope = dpp->tf_shift->CM_GAMCOR_RAMA_EXP_REGION_END_SLOPE_B;
+ reg->masks.field_region_end_slope = dpp->tf_mask->CM_GAMCOR_RAMA_EXP_REGION_END_SLOPE_B;
+ reg->shifts.field_region_end_base = dpp->tf_shift->CM_GAMCOR_RAMA_EXP_REGION_END_BASE_B;
+ reg->masks.field_region_end_base = dpp->tf_mask->CM_GAMCOR_RAMA_EXP_REGION_END_BASE_B;
+ reg->shifts.field_region_linear_slope = dpp->tf_shift->CM_GAMCOR_RAMA_EXP_REGION_START_SLOPE_B;
+ reg->masks.field_region_linear_slope = dpp->tf_mask->CM_GAMCOR_RAMA_EXP_REGION_START_SLOPE_B;
+ reg->shifts.exp_region_start = dpp->tf_shift->CM_GAMCOR_RAMA_EXP_REGION_START_B;
+ reg->masks.exp_region_start = dpp->tf_mask->CM_GAMCOR_RAMA_EXP_REGION_START_B;
+ reg->shifts.exp_resion_start_segment = dpp->tf_shift->CM_GAMCOR_RAMA_EXP_REGION_START_SEGMENT_B;
+ reg->masks.exp_resion_start_segment = dpp->tf_mask->CM_GAMCOR_RAMA_EXP_REGION_START_SEGMENT_B;
+}
+
+static void dpp3_configure_gamcor_lut(
+ struct dpp *dpp_base,
+ bool is_ram_a)
+{
+ struct dcn3_dpp *dpp = TO_DCN30_DPP(dpp_base);
+
+ REG_UPDATE(CM_GAMCOR_LUT_CONTROL,
+ CM_GAMCOR_LUT_WRITE_COLOR_MASK, 7);
+ REG_UPDATE(CM_GAMCOR_LUT_CONTROL,
+ CM_GAMCOR_LUT_HOST_SEL, is_ram_a == true ? 0:1);
+ REG_SET(CM_GAMCOR_LUT_INDEX, 0, CM_GAMCOR_LUT_INDEX, 0);
+}
+
+
+bool dpp3_program_gamcor_lut(
+ struct dpp *dpp_base, const struct pwl_params *params)
+{
+ enum dc_lut_mode current_mode;
+ enum dc_lut_mode next_mode;
+ struct dcn3_dpp *dpp = TO_DCN30_DPP(dpp_base);
+ struct dcn3_xfer_func_reg gam_regs;
+
+ dpp3_enable_cm_block(dpp_base);
+
+ if (params == NULL) { //bypass if we have no pwl data
+ REG_SET(CM_GAMCOR_CONTROL, 0, CM_GAMCOR_MODE, 0);
+ return false;
+ }
+ dpp3_power_on_gamcor_lut(dpp_base, true);
+ REG_SET(CM_GAMCOR_CONTROL, 0, CM_GAMCOR_MODE, 2);
+
+ current_mode = dpp30_get_gamcor_current(dpp_base);
+ if (current_mode == LUT_BYPASS || current_mode == LUT_RAM_A)
+ next_mode = LUT_RAM_B;
+ else
+ next_mode = LUT_RAM_A;
+
+ dpp3_power_on_gamcor_lut(dpp_base, true);
+ dpp3_configure_gamcor_lut(dpp_base, next_mode == LUT_RAM_A ? true:false);
+
+ if (next_mode == LUT_RAM_B) {
+ gam_regs.start_cntl_b = REG(CM_GAMCOR_RAMB_START_CNTL_B);
+ gam_regs.start_cntl_g = REG(CM_GAMCOR_RAMB_START_CNTL_G);
+ gam_regs.start_cntl_r = REG(CM_GAMCOR_RAMB_START_CNTL_R);
+ gam_regs.start_slope_cntl_b = REG(CM_GAMCOR_RAMB_START_SLOPE_CNTL_B);
+ gam_regs.start_slope_cntl_g = REG(CM_GAMCOR_RAMB_START_SLOPE_CNTL_G);
+ gam_regs.start_slope_cntl_r = REG(CM_GAMCOR_RAMB_START_SLOPE_CNTL_R);
+ gam_regs.start_end_cntl1_b = REG(CM_GAMCOR_RAMB_END_CNTL1_B);
+ gam_regs.start_end_cntl2_b = REG(CM_GAMCOR_RAMB_END_CNTL2_B);
+ gam_regs.start_end_cntl1_g = REG(CM_GAMCOR_RAMB_END_CNTL1_G);
+ gam_regs.start_end_cntl2_g = REG(CM_GAMCOR_RAMB_END_CNTL2_G);
+ gam_regs.start_end_cntl1_r = REG(CM_GAMCOR_RAMB_END_CNTL1_R);
+ gam_regs.start_end_cntl2_r = REG(CM_GAMCOR_RAMB_END_CNTL2_R);
+ gam_regs.region_start = REG(CM_GAMCOR_RAMB_REGION_0_1);
+ gam_regs.region_end = REG(CM_GAMCOR_RAMB_REGION_32_33);
+ //New registers in DCN3AG/DCN GAMCOR block
+ gam_regs.offset_b = REG(CM_GAMCOR_RAMB_OFFSET_B);
+ gam_regs.offset_g = REG(CM_GAMCOR_RAMB_OFFSET_G);
+ gam_regs.offset_r = REG(CM_GAMCOR_RAMB_OFFSET_R);
+ gam_regs.start_base_cntl_b = REG(CM_GAMCOR_RAMB_START_BASE_CNTL_B);
+ gam_regs.start_base_cntl_g = REG(CM_GAMCOR_RAMB_START_BASE_CNTL_G);
+ gam_regs.start_base_cntl_r = REG(CM_GAMCOR_RAMB_START_BASE_CNTL_R);
+ } else {
+ gam_regs.start_cntl_b = REG(CM_GAMCOR_RAMA_START_CNTL_B);
+ gam_regs.start_cntl_g = REG(CM_GAMCOR_RAMA_START_CNTL_G);
+ gam_regs.start_cntl_r = REG(CM_GAMCOR_RAMA_START_CNTL_R);
+ gam_regs.start_slope_cntl_b = REG(CM_GAMCOR_RAMA_START_SLOPE_CNTL_B);
+ gam_regs.start_slope_cntl_g = REG(CM_GAMCOR_RAMA_START_SLOPE_CNTL_G);
+ gam_regs.start_slope_cntl_r = REG(CM_GAMCOR_RAMA_START_SLOPE_CNTL_R);
+ gam_regs.start_end_cntl1_b = REG(CM_GAMCOR_RAMA_END_CNTL1_B);
+ gam_regs.start_end_cntl2_b = REG(CM_GAMCOR_RAMA_END_CNTL2_B);
+ gam_regs.start_end_cntl1_g = REG(CM_GAMCOR_RAMA_END_CNTL1_G);
+ gam_regs.start_end_cntl2_g = REG(CM_GAMCOR_RAMA_END_CNTL2_G);
+ gam_regs.start_end_cntl1_r = REG(CM_GAMCOR_RAMA_END_CNTL1_R);
+ gam_regs.start_end_cntl2_r = REG(CM_GAMCOR_RAMA_END_CNTL2_R);
+ gam_regs.region_start = REG(CM_GAMCOR_RAMA_REGION_0_1);
+ gam_regs.region_end = REG(CM_GAMCOR_RAMA_REGION_32_33);
+ //New registers in DCN3AG/DCN GAMCOR block
+ gam_regs.offset_b = REG(CM_GAMCOR_RAMA_OFFSET_B);
+ gam_regs.offset_g = REG(CM_GAMCOR_RAMA_OFFSET_G);
+ gam_regs.offset_r = REG(CM_GAMCOR_RAMA_OFFSET_R);
+ gam_regs.start_base_cntl_b = REG(CM_GAMCOR_RAMA_START_BASE_CNTL_B);
+ gam_regs.start_base_cntl_g = REG(CM_GAMCOR_RAMA_START_BASE_CNTL_G);
+ gam_regs.start_base_cntl_r = REG(CM_GAMCOR_RAMA_START_BASE_CNTL_R);
+ }
+
+ //get register fields
+ dpp3_gamcor_reg_field(dpp, &gam_regs);
+
+ //program register set for LUTA/LUTB
+ cm_helper_program_gamcor_xfer_func(dpp_base->ctx, params, &gam_regs);
+
+ dpp3_program_gammcor_lut(dpp_base, params->rgb_resulted, params->hw_points_num,
+ next_mode == LUT_RAM_A ? true:false);
+
+ //select Gamma LUT to use for next frame
+ REG_UPDATE(CM_GAMCOR_CONTROL, CM_GAMCOR_SELECT, next_mode == LUT_RAM_A ? 0:1);
+
+ return true;
+}
+
+void dpp3_set_hdr_multiplier(
+ struct dpp *dpp_base,
+ uint32_t multiplier)
+{
+ struct dcn3_dpp *dpp = TO_DCN30_DPP(dpp_base);
+
+ REG_UPDATE(CM_HDR_MULT_COEF, CM_HDR_MULT_COEF, multiplier);
+}
+
+
+static void program_gamut_remap(
+ struct dcn3_dpp *dpp,
+ const uint16_t *regval,
+ int select)
+{
+ uint16_t selection = 0;
+ struct color_matrices_reg gam_regs;
+
+ if (regval == NULL || select == GAMUT_REMAP_BYPASS) {
+ REG_SET(CM_GAMUT_REMAP_CONTROL, 0,
+ CM_GAMUT_REMAP_MODE, 0);
+ return;
+ }
+ switch (select) {
+ case GAMUT_REMAP_COEFF:
+ selection = 1;
+ break;
+ /*this corresponds to GAMUT_REMAP coefficients set B
+ *we don't have common coefficient sets in dcn3ag/dcn3
+ */
+ case GAMUT_REMAP_COMA_COEFF:
+ selection = 2;
+ break;
+ default:
+ break;
+ }
+
+ gam_regs.shifts.csc_c11 = dpp->tf_shift->CM_GAMUT_REMAP_C11;
+ gam_regs.masks.csc_c11 = dpp->tf_mask->CM_GAMUT_REMAP_C11;
+ gam_regs.shifts.csc_c12 = dpp->tf_shift->CM_GAMUT_REMAP_C12;
+ gam_regs.masks.csc_c12 = dpp->tf_mask->CM_GAMUT_REMAP_C12;
+
+
+ if (select == GAMUT_REMAP_COEFF) {
+ gam_regs.csc_c11_c12 = REG(CM_GAMUT_REMAP_C11_C12);
+ gam_regs.csc_c33_c34 = REG(CM_GAMUT_REMAP_C33_C34);
+
+ cm_helper_program_color_matrices(
+ dpp->base.ctx,
+ regval,
+ &gam_regs);
+
+ } else if (select == GAMUT_REMAP_COMA_COEFF) {
+
+ gam_regs.csc_c11_c12 = REG(CM_GAMUT_REMAP_B_C11_C12);
+ gam_regs.csc_c33_c34 = REG(CM_GAMUT_REMAP_B_C33_C34);
+
+ cm_helper_program_color_matrices(
+ dpp->base.ctx,
+ regval,
+ &gam_regs);
+
+ }
+ //select coefficient set to use
+ REG_SET(
+ CM_GAMUT_REMAP_CONTROL, 0,
+ CM_GAMUT_REMAP_MODE, selection);
+}
+
+void dpp3_cm_set_gamut_remap(
+ struct dpp *dpp_base,
+ const struct dpp_grph_csc_adjustment *adjust)
+{
+ struct dcn3_dpp *dpp = TO_DCN30_DPP(dpp_base);
+ int i = 0;
+ int gamut_mode;
+
+ if (adjust->gamut_adjust_type != GRAPHICS_GAMUT_ADJUST_TYPE_SW)
+ /* Bypass if type is bypass or hw */
+ program_gamut_remap(dpp, NULL, GAMUT_REMAP_BYPASS);
+ else {
+ struct fixed31_32 arr_matrix[12];
+ uint16_t arr_reg_val[12];
+
+ for (i = 0; i < 12; i++)
+ arr_matrix[i] = adjust->temperature_matrix[i];
+
+ convert_float_matrix(
+ arr_reg_val, arr_matrix, 12);
+
+ //current coefficient set in use
+ REG_GET(CM_GAMUT_REMAP_CONTROL, CM_GAMUT_REMAP_MODE_CURRENT, &gamut_mode);
+
+ if (gamut_mode == 0)
+ gamut_mode = 1; //use coefficient set A
+ else if (gamut_mode == 1)
+ gamut_mode = 2;
+ else
+ gamut_mode = 1;
+
+ //follow dcn2 approach for now - using only coefficient set A
+ program_gamut_remap(dpp, arr_reg_val, GAMUT_REMAP_COEFF);
+ }
+}
diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dwb.c b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dwb.c
new file mode 100644
index 000000000000..f14f69616692
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dwb.c
@@ -0,0 +1,264 @@
+/*
+ * Copyright 2020 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+
+#include "reg_helper.h"
+#include "resource.h"
+#include "dwb.h"
+#include "dcn30_dwb.h"
+
+
+#define REG(reg)\
+ dwbc30->dwbc_regs->reg
+
+#define CTX \
+ dwbc30->base.ctx
+
+#undef FN
+#define FN(reg_name, field_name) \
+ dwbc30->dwbc_shift->field_name, dwbc30->dwbc_mask->field_name
+
+#define DC_LOGGER \
+ dwbc30->base.ctx->logger
+
+static bool dwb3_get_caps(struct dwbc *dwbc, struct dwb_caps *caps)
+{
+ if (caps) {
+ caps->adapter_id = 0; /* we only support 1 adapter currently */
+ caps->hw_version = DCN_VERSION_3_0;
+ caps->num_pipes = 2;
+ memset(&caps->reserved, 0, sizeof(caps->reserved));
+ memset(&caps->reserved2, 0, sizeof(caps->reserved2));
+ caps->sw_version = dwb_ver_2_0;
+ caps->caps.support_dwb = true;
+ caps->caps.support_ogam = true;
+ caps->caps.support_wbscl = true;
+ caps->caps.support_ocsc = false;
+ caps->caps.support_stereo = true;
+ return true;
+ } else {
+ return false;
+ }
+}
+
+void dwb3_config_fc(struct dwbc *dwbc, struct dc_dwb_params *params)
+{
+ struct dcn30_dwbc *dwbc30 = TO_DCN30_DWBC(dwbc);
+
+ /* Set DWB source size */
+ REG_UPDATE_2(FC_SOURCE_SIZE, FC_SOURCE_WIDTH, params->cnv_params.src_width,
+ FC_SOURCE_HEIGHT, params->cnv_params.src_height);
+
+ /* source size is not equal the source size, then enable cropping. */
+ if (params->cnv_params.crop_en) {
+ REG_UPDATE(FC_MODE_CTRL, FC_WINDOW_CROP_EN, 1);
+ REG_UPDATE(FC_WINDOW_START, FC_WINDOW_START_X, params->cnv_params.crop_x);
+ REG_UPDATE(FC_WINDOW_START, FC_WINDOW_START_Y, params->cnv_params.crop_y);
+ REG_UPDATE(FC_WINDOW_SIZE, FC_WINDOW_WIDTH, params->cnv_params.crop_width);
+ REG_UPDATE(FC_WINDOW_SIZE, FC_WINDOW_HEIGHT, params->cnv_params.crop_height);
+ } else {
+ REG_UPDATE(FC_MODE_CTRL, FC_WINDOW_CROP_EN, 0);
+ }
+
+ /* Set CAPTURE_RATE */
+ REG_UPDATE(FC_MODE_CTRL, FC_FRAME_CAPTURE_RATE, params->capture_rate);
+
+ dwb3_set_stereo(dwbc, &params->stereo_params);
+}
+
+bool dwb3_enable(struct dwbc *dwbc, struct dc_dwb_params *params)
+{
+ struct dcn30_dwbc *dwbc30 = TO_DCN30_DWBC(dwbc);
+ DC_LOG_DWB("%s dwb3_enabled at inst = %d", __func__, dwbc->inst);
+
+ /* Set WB_ENABLE (not double buffered; capture not enabled) */
+ REG_UPDATE(DWB_ENABLE_CLK_CTRL, DWB_ENABLE, 1);
+
+ /* Set FC parameters */
+ dwb3_config_fc(dwbc, params);
+
+ /* Program color processing unit */
+ dwb3_program_hdr_mult(dwbc, params);
+ dwb3_set_gamut_remap(dwbc, params);
+ dwb3_ogam_set_input_transfer_func(dwbc, params->out_transfer_func);
+
+ /* Program output denorm */
+ dwb3_set_denorm(dwbc, params);
+
+ /* Enable DWB capture enable (double buffered) */
+ REG_UPDATE(FC_MODE_CTRL, FC_FRAME_CAPTURE_EN, DWB_FRAME_CAPTURE_ENABLE);
+
+ /* First pixel count */
+ REG_UPDATE(FC_FLOW_CTRL, FC_FIRST_PIXEL_DELAY_COUNT, 96);
+
+ return true;
+}
+
+bool dwb3_disable(struct dwbc *dwbc)
+{
+ struct dcn30_dwbc *dwbc30 = TO_DCN30_DWBC(dwbc);
+
+ /* disable FC */
+ REG_UPDATE(FC_MODE_CTRL, FC_FRAME_CAPTURE_EN, DWB_FRAME_CAPTURE_DISABLE);
+
+ /* disable WB */
+ REG_UPDATE(DWB_ENABLE_CLK_CTRL, DWB_ENABLE, 0);
+
+ DC_LOG_DWB("%s dwb3_disabled at inst = %d", __func__, dwbc->inst);
+ return true;
+}
+
+bool dwb3_update(struct dwbc *dwbc, struct dc_dwb_params *params)
+{
+ struct dcn30_dwbc *dwbc30 = TO_DCN30_DWBC(dwbc);
+ unsigned int pre_locked;
+
+ /*
+ * Check if the caller has already locked DWB registers.
+ * If so: assume the caller will unlock, so don't touch the lock.
+ * If not: lock them for this update, then unlock after the
+ * update is complete.
+ */
+ REG_GET(DWB_UPDATE_CTRL, DWB_UPDATE_LOCK, &pre_locked);
+ DC_LOG_DWB("%s dwb update, inst = %d", __func__, dwbc->inst);
+
+ if (pre_locked == 0) {
+ /* Lock DWB registers */
+ REG_UPDATE(DWB_UPDATE_CTRL, DWB_UPDATE_LOCK, 1);
+ }
+
+ /* Set FC parameters */
+ dwb3_config_fc(dwbc, params);
+
+ /* Program color processing unit */
+ dwb3_program_hdr_mult(dwbc, params);
+ dwb3_set_gamut_remap(dwbc, params);
+ dwb3_ogam_set_input_transfer_func(dwbc, params->out_transfer_func);
+
+ /* Program output denorm */
+ dwb3_set_denorm(dwbc, params);
+
+ if (pre_locked == 0) {
+ /* Unlock DWB registers */
+ REG_UPDATE(DWB_UPDATE_CTRL, DWB_UPDATE_LOCK, 0);
+ }
+
+ return true;
+}
+
+bool dwb3_is_enabled(struct dwbc *dwbc)
+{
+ struct dcn30_dwbc *dwbc30 = TO_DCN30_DWBC(dwbc);
+ unsigned int dwb_enabled = 0;
+ unsigned int fc_frame_capture_en = 0;
+
+ REG_GET(DWB_ENABLE_CLK_CTRL, DWB_ENABLE, &dwb_enabled);
+ REG_GET(FC_MODE_CTRL, FC_FRAME_CAPTURE_EN, &fc_frame_capture_en);
+
+ return ((dwb_enabled != 0) && (fc_frame_capture_en != 0));
+}
+
+void dwb3_set_stereo(struct dwbc *dwbc,
+ struct dwb_stereo_params *stereo_params)
+{
+ struct dcn30_dwbc *dwbc30 = TO_DCN30_DWBC(dwbc);
+
+ if (stereo_params->stereo_enabled) {
+ REG_UPDATE(FC_MODE_CTRL, FC_EYE_SELECTION, stereo_params->stereo_eye_select);
+ REG_UPDATE(FC_MODE_CTRL, FC_STEREO_EYE_POLARITY, stereo_params->stereo_polarity);
+ DC_LOG_DWB("%s dwb stereo enabled", __func__);
+ } else {
+ REG_UPDATE(FC_MODE_CTRL, FC_EYE_SELECTION, 0);
+ DC_LOG_DWB("%s dwb stereo disabled", __func__);
+ }
+}
+
+void dwb3_set_new_content(struct dwbc *dwbc,
+ bool is_new_content)
+{
+ struct dcn30_dwbc *dwbc30 = TO_DCN30_DWBC(dwbc);
+
+ REG_UPDATE(FC_MODE_CTRL, FC_NEW_CONTENT, is_new_content);
+}
+
+void dwb3_set_denorm(struct dwbc *dwbc, struct dc_dwb_params *params)
+{
+ struct dcn30_dwbc *dwbc30 = TO_DCN30_DWBC(dwbc);
+
+ /* Set output format*/
+ REG_UPDATE(DWB_OUT_CTRL, OUT_FORMAT, params->cnv_params.fc_out_format);
+
+ /* Set output denorm */
+ if (params->cnv_params.fc_out_format == DWB_OUT_FORMAT_32BPP_ARGB ||
+ params->cnv_params.fc_out_format == DWB_OUT_FORMAT_32BPP_RGBA) {
+ REG_UPDATE(DWB_OUT_CTRL, OUT_DENORM, params->cnv_params.out_denorm_mode);
+ REG_UPDATE(DWB_OUT_CTRL, OUT_MAX, params->cnv_params.out_max_pix_val);
+ REG_UPDATE(DWB_OUT_CTRL, OUT_MIN, params->cnv_params.out_min_pix_val);
+ }
+}
+
+
+const struct dwbc_funcs dcn30_dwbc_funcs = {
+ .get_caps = dwb3_get_caps,
+ .enable = dwb3_enable,
+ .disable = dwb3_disable,
+ .update = dwb3_update,
+ .is_enabled = dwb3_is_enabled,
+ .set_stereo = dwb3_set_stereo,
+ .set_new_content = dwb3_set_new_content,
+ .dwb_program_output_csc = NULL,
+ .dwb_ogam_set_input_transfer_func = dwb3_ogam_set_input_transfer_func, //TODO: rename
+ .dwb_set_scaler = NULL,
+};
+
+void dcn30_dwbc_construct(struct dcn30_dwbc *dwbc30,
+ struct dc_context *ctx,
+ const struct dcn30_dwbc_registers *dwbc_regs,
+ const struct dcn30_dwbc_shift *dwbc_shift,
+ const struct dcn30_dwbc_mask *dwbc_mask,
+ int inst)
+{
+ dwbc30->base.ctx = ctx;
+
+ dwbc30->base.inst = inst;
+ dwbc30->base.funcs = &dcn30_dwbc_funcs;
+
+ dwbc30->dwbc_regs = dwbc_regs;
+ dwbc30->dwbc_shift = dwbc_shift;
+ dwbc30->dwbc_mask = dwbc_mask;
+}
+
+void dwb3_set_host_read_rate_control(struct dwbc *dwbc, bool host_read_delay)
+{
+ struct dcn30_dwbc *dwbc30 = TO_DCN30_DWBC(dwbc);
+
+ /*
+ * Set maximum delay of host read access to DWBSCL LUT or OGAM LUT if there are no
+ * idle cycles in HW pipeline (in number of clock cycles times 4)
+ */
+ REG_UPDATE(DWB_HOST_READ_CONTROL, DWB_HOST_READ_RATE_CONTROL, host_read_delay);
+
+ DC_LOG_DWB("%s dwb3_rate_control at inst = %d", __func__, dwbc->inst);
+}
diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dwb.h b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dwb.h
new file mode 100644
index 000000000000..1010930cf071
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dwb.h
@@ -0,0 +1,923 @@
+/* Copyright 2020 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+#ifndef __DC_DWBC_DCN30_H__
+#define __DC_DWBC_DCN30_H__
+
+#define TO_DCN30_DWBC(dwbc_base) \
+ container_of(dwbc_base, struct dcn30_dwbc, base)
+
+/* DCN */
+#define BASE_INNER(seg) \
+ DCE_BASE__INST0_SEG ## seg
+
+#define BASE(seg) \
+ BASE_INNER(seg)
+
+#define SF_DWB(reg_name, block, id, field_name, post_fix)\
+ .field_name = block ## id ## _ ## reg_name ## __ ## field_name ## post_fix
+
+ /* set field name */
+#define SF_DWB2(reg_name, block, id, field_name, post_fix)\
+ .field_name = reg_name ## __ ## field_name ## post_fix
+
+
+#define DWBC_COMMON_REG_LIST_DCN30(inst) \
+ SR(DWB_ENABLE_CLK_CTRL),\
+ SR(DWB_MEM_PWR_CTRL),\
+ SR(FC_MODE_CTRL),\
+ SR(FC_FLOW_CTRL),\
+ SR(FC_WINDOW_START),\
+ SR(FC_WINDOW_SIZE),\
+ SR(FC_SOURCE_SIZE),\
+ SR(DWB_UPDATE_CTRL),\
+ SR(DWB_CRC_CTRL),\
+ SR(DWB_CRC_MASK_R_G),\
+ SR(DWB_CRC_MASK_B_A),\
+ SR(DWB_CRC_VAL_R_G),\
+ SR(DWB_CRC_VAL_B_A),\
+ SR(DWB_OUT_CTRL),\
+ SR(DWB_MMHUBBUB_BACKPRESSURE_CNT_EN),\
+ SR(DWB_MMHUBBUB_BACKPRESSURE_CNT),\
+ SR(DWB_HOST_READ_CONTROL),\
+ SR(DWB_SOFT_RESET),\
+ SR(DWB_HDR_MULT_COEF),\
+ SR(DWB_GAMUT_REMAP_MODE),\
+ SR(DWB_GAMUT_REMAP_COEF_FORMAT),\
+ SR(DWB_GAMUT_REMAPA_C11_C12),\
+ SR(DWB_GAMUT_REMAPA_C13_C14),\
+ SR(DWB_GAMUT_REMAPA_C21_C22),\
+ SR(DWB_GAMUT_REMAPA_C23_C24),\
+ SR(DWB_GAMUT_REMAPA_C31_C32),\
+ SR(DWB_GAMUT_REMAPA_C33_C34),\
+ SR(DWB_GAMUT_REMAPB_C11_C12),\
+ SR(DWB_GAMUT_REMAPB_C13_C14),\
+ SR(DWB_GAMUT_REMAPB_C21_C22),\
+ SR(DWB_GAMUT_REMAPB_C23_C24),\
+ SR(DWB_GAMUT_REMAPB_C31_C32),\
+ SR(DWB_GAMUT_REMAPB_C33_C34),\
+ SR(DWB_OGAM_CONTROL),\
+ SR(DWB_OGAM_LUT_INDEX),\
+ SR(DWB_OGAM_LUT_DATA),\
+ SR(DWB_OGAM_LUT_CONTROL),\
+ SR(DWB_OGAM_RAMA_START_CNTL_B),\
+ SR(DWB_OGAM_RAMA_START_CNTL_G),\
+ SR(DWB_OGAM_RAMA_START_CNTL_R),\
+ SR(DWB_OGAM_RAMA_START_BASE_CNTL_B),\
+ SR(DWB_OGAM_RAMA_START_SLOPE_CNTL_B),\
+ SR(DWB_OGAM_RAMA_START_BASE_CNTL_G),\
+ SR(DWB_OGAM_RAMA_START_SLOPE_CNTL_G),\
+ SR(DWB_OGAM_RAMA_START_BASE_CNTL_R),\
+ SR(DWB_OGAM_RAMA_START_SLOPE_CNTL_R),\
+ SR(DWB_OGAM_RAMA_END_CNTL1_B),\
+ SR(DWB_OGAM_RAMA_END_CNTL2_B),\
+ SR(DWB_OGAM_RAMA_END_CNTL1_G),\
+ SR(DWB_OGAM_RAMA_END_CNTL2_G),\
+ SR(DWB_OGAM_RAMA_END_CNTL1_R),\
+ SR(DWB_OGAM_RAMA_END_CNTL2_R),\
+ SR(DWB_OGAM_RAMA_OFFSET_B),\
+ SR(DWB_OGAM_RAMA_OFFSET_G),\
+ SR(DWB_OGAM_RAMA_OFFSET_R),\
+ SR(DWB_OGAM_RAMA_REGION_0_1),\
+ SR(DWB_OGAM_RAMA_REGION_2_3),\
+ SR(DWB_OGAM_RAMA_REGION_4_5),\
+ SR(DWB_OGAM_RAMA_REGION_6_7),\
+ SR(DWB_OGAM_RAMA_REGION_8_9),\
+ SR(DWB_OGAM_RAMA_REGION_10_11),\
+ SR(DWB_OGAM_RAMA_REGION_12_13),\
+ SR(DWB_OGAM_RAMA_REGION_14_15),\
+ SR(DWB_OGAM_RAMA_REGION_16_17),\
+ SR(DWB_OGAM_RAMA_REGION_18_19),\
+ SR(DWB_OGAM_RAMA_REGION_20_21),\
+ SR(DWB_OGAM_RAMA_REGION_22_23),\
+ SR(DWB_OGAM_RAMA_REGION_24_25),\
+ SR(DWB_OGAM_RAMA_REGION_26_27),\
+ SR(DWB_OGAM_RAMA_REGION_28_29),\
+ SR(DWB_OGAM_RAMA_REGION_30_31),\
+ SR(DWB_OGAM_RAMA_REGION_32_33),\
+ SR(DWB_OGAM_RAMB_START_CNTL_B),\
+ SR(DWB_OGAM_RAMB_START_CNTL_G),\
+ SR(DWB_OGAM_RAMB_START_CNTL_R),\
+ SR(DWB_OGAM_RAMB_START_BASE_CNTL_B),\
+ SR(DWB_OGAM_RAMB_START_SLOPE_CNTL_B),\
+ SR(DWB_OGAM_RAMB_START_BASE_CNTL_G),\
+ SR(DWB_OGAM_RAMB_START_SLOPE_CNTL_G),\
+ SR(DWB_OGAM_RAMB_START_BASE_CNTL_R),\
+ SR(DWB_OGAM_RAMB_START_SLOPE_CNTL_R),\
+ SR(DWB_OGAM_RAMB_END_CNTL1_B),\
+ SR(DWB_OGAM_RAMB_END_CNTL2_B),\
+ SR(DWB_OGAM_RAMB_END_CNTL1_G),\
+ SR(DWB_OGAM_RAMB_END_CNTL2_G),\
+ SR(DWB_OGAM_RAMB_END_CNTL1_R),\
+ SR(DWB_OGAM_RAMB_END_CNTL2_R),\
+ SR(DWB_OGAM_RAMB_OFFSET_B),\
+ SR(DWB_OGAM_RAMB_OFFSET_G),\
+ SR(DWB_OGAM_RAMB_OFFSET_R),\
+ SR(DWB_OGAM_RAMB_REGION_0_1),\
+ SR(DWB_OGAM_RAMB_REGION_2_3),\
+ SR(DWB_OGAM_RAMB_REGION_4_5),\
+ SR(DWB_OGAM_RAMB_REGION_6_7),\
+ SR(DWB_OGAM_RAMB_REGION_8_9),\
+ SR(DWB_OGAM_RAMB_REGION_10_11),\
+ SR(DWB_OGAM_RAMB_REGION_12_13),\
+ SR(DWB_OGAM_RAMB_REGION_14_15),\
+ SR(DWB_OGAM_RAMB_REGION_16_17),\
+ SR(DWB_OGAM_RAMB_REGION_18_19),\
+ SR(DWB_OGAM_RAMB_REGION_20_21),\
+ SR(DWB_OGAM_RAMB_REGION_22_23),\
+ SR(DWB_OGAM_RAMB_REGION_24_25),\
+ SR(DWB_OGAM_RAMB_REGION_26_27),\
+ SR(DWB_OGAM_RAMB_REGION_28_29),\
+ SR(DWB_OGAM_RAMB_REGION_30_31),\
+ SR(DWB_OGAM_RAMB_REGION_32_33)
+
+
+#define DWBC_COMMON_MASK_SH_LIST_DCN30(mask_sh) \
+ SF_DWB2(DWB_ENABLE_CLK_CTRL, DWB_TOP, 0, DWB_ENABLE, mask_sh),\
+ SF_DWB2(DWB_ENABLE_CLK_CTRL, DWB_TOP, 0, DISPCLK_R_DWB_GATE_DIS, mask_sh),\
+ SF_DWB2(DWB_ENABLE_CLK_CTRL, DWB_TOP, 0, DISPCLK_G_DWB_GATE_DIS, mask_sh),\
+ SF_DWB2(DWB_ENABLE_CLK_CTRL, DWB_TOP, 0, DWB_TEST_CLK_SEL, mask_sh),\
+ SF_DWB2(DWB_MEM_PWR_CTRL, DWB_TOP, 0, DWB_OGAM_LUT_MEM_PWR_FORCE, mask_sh),\
+ SF_DWB2(DWB_MEM_PWR_CTRL, DWB_TOP, 0, DWB_OGAM_LUT_MEM_PWR_DIS, mask_sh),\
+ SF_DWB2(DWB_MEM_PWR_CTRL, DWB_TOP, 0, DWB_OGAM_LUT_MEM_PWR_STATE, mask_sh),\
+ SF_DWB2(FC_MODE_CTRL, DWB_TOP, 0, FC_FRAME_CAPTURE_EN, mask_sh),\
+ SF_DWB2(FC_MODE_CTRL, DWB_TOP, 0, FC_FRAME_CAPTURE_RATE, mask_sh),\
+ SF_DWB2(FC_MODE_CTRL, DWB_TOP, 0, FC_WINDOW_CROP_EN, mask_sh),\
+ SF_DWB2(FC_MODE_CTRL, DWB_TOP, 0, FC_EYE_SELECTION, mask_sh),\
+ SF_DWB2(FC_MODE_CTRL, DWB_TOP, 0, FC_STEREO_EYE_POLARITY, mask_sh),\
+ SF_DWB2(FC_MODE_CTRL, DWB_TOP, 0, FC_NEW_CONTENT, mask_sh),\
+ SF_DWB2(FC_MODE_CTRL, DWB_TOP, 0, FC_FRAME_CAPTURE_EN_CURRENT, mask_sh),\
+ SF_DWB2(FC_FLOW_CTRL, DWB_TOP, 0, FC_FIRST_PIXEL_DELAY_COUNT, mask_sh),\
+ SF_DWB2(FC_WINDOW_START, DWB_TOP, 0, FC_WINDOW_START_X, mask_sh),\
+ SF_DWB2(FC_WINDOW_START, DWB_TOP, 0, FC_WINDOW_START_Y, mask_sh),\
+ SF_DWB2(FC_WINDOW_SIZE, DWB_TOP, 0, FC_WINDOW_WIDTH, mask_sh),\
+ SF_DWB2(FC_WINDOW_SIZE, DWB_TOP, 0, FC_WINDOW_HEIGHT, mask_sh),\
+ SF_DWB2(FC_SOURCE_SIZE, DWB_TOP, 0, FC_SOURCE_WIDTH, mask_sh),\
+ SF_DWB2(FC_SOURCE_SIZE, DWB_TOP, 0, FC_SOURCE_HEIGHT, mask_sh),\
+ SF_DWB2(DWB_UPDATE_CTRL, DWB_TOP, 0, DWB_UPDATE_LOCK, mask_sh),\
+ SF_DWB2(DWB_UPDATE_CTRL, DWB_TOP, 0, DWB_UPDATE_PENDING, mask_sh),\
+ SF_DWB2(DWB_CRC_CTRL, DWB_TOP, 0, DWB_CRC_EN, mask_sh),\
+ SF_DWB2(DWB_CRC_CTRL, DWB_TOP, 0, DWB_CRC_CONT_EN, mask_sh),\
+ SF_DWB2(DWB_CRC_CTRL, DWB_TOP, 0, DWB_CRC_SRC_SEL, mask_sh),\
+ SF_DWB2(DWB_CRC_MASK_R_G, DWB_TOP, 0, DWB_CRC_RED_MASK, mask_sh),\
+ SF_DWB2(DWB_CRC_MASK_R_G, DWB_TOP, 0, DWB_CRC_GREEN_MASK, mask_sh),\
+ SF_DWB2(DWB_CRC_MASK_B_A, DWB_TOP, 0, DWB_CRC_BLUE_MASK, mask_sh),\
+ SF_DWB2(DWB_CRC_MASK_B_A, DWB_TOP, 0, DWB_CRC_A_MASK, mask_sh),\
+ SF_DWB2(DWB_CRC_VAL_R_G, DWB_TOP, 0, DWB_CRC_SIG_RED, mask_sh),\
+ SF_DWB2(DWB_CRC_VAL_R_G, DWB_TOP, 0, DWB_CRC_SIG_GREEN, mask_sh),\
+ SF_DWB2(DWB_CRC_VAL_B_A, DWB_TOP, 0, DWB_CRC_SIG_BLUE, mask_sh),\
+ SF_DWB2(DWB_CRC_VAL_B_A, DWB_TOP, 0, DWB_CRC_SIG_A, mask_sh),\
+ SF_DWB2(DWB_OUT_CTRL, DWB_TOP, 0, OUT_FORMAT, mask_sh),\
+ SF_DWB2(DWB_OUT_CTRL, DWB_TOP, 0, OUT_DENORM, mask_sh),\
+ SF_DWB2(DWB_OUT_CTRL, DWB_TOP, 0, OUT_MAX, mask_sh),\
+ SF_DWB2(DWB_OUT_CTRL, DWB_TOP, 0, OUT_MIN, mask_sh),\
+ SF_DWB2(DWB_MMHUBBUB_BACKPRESSURE_CNT_EN, DWB_TOP, 0, DWB_MMHUBBUB_BACKPRESSURE_CNT_EN, mask_sh),\
+ SF_DWB2(DWB_MMHUBBUB_BACKPRESSURE_CNT, DWB_TOP, 0, DWB_MMHUBBUB_MAX_BACKPRESSURE, mask_sh),\
+ SF_DWB2(DWB_HOST_READ_CONTROL, DWB_TOP, 0, DWB_HOST_READ_RATE_CONTROL, mask_sh),\
+ SF_DWB2(DWB_SOFT_RESET, DWB_TOP, 0, DWB_SOFT_RESET, mask_sh),\
+ SF_DWB2(DWB_HDR_MULT_COEF, DWBCP, 0, DWB_HDR_MULT_COEF, mask_sh),\
+ SF_DWB2(DWB_GAMUT_REMAP_MODE, DWBCP, 0, DWB_GAMUT_REMAP_MODE, mask_sh),\
+ SF_DWB2(DWB_GAMUT_REMAP_MODE, DWBCP, 0, DWB_GAMUT_REMAP_MODE_CURRENT, mask_sh),\
+ SF_DWB2(DWB_GAMUT_REMAP_COEF_FORMAT, DWBCP, 0, DWB_GAMUT_REMAP_COEF_FORMAT, mask_sh),\
+ SF_DWB2(DWB_GAMUT_REMAPA_C11_C12, DWBCP, 0, DWB_GAMUT_REMAPA_C11, mask_sh),\
+ SF_DWB2(DWB_GAMUT_REMAPA_C11_C12, DWBCP, 0, DWB_GAMUT_REMAPA_C12, mask_sh),\
+ SF_DWB2(DWB_GAMUT_REMAPA_C13_C14, DWBCP, 0, DWB_GAMUT_REMAPA_C13, mask_sh),\
+ SF_DWB2(DWB_GAMUT_REMAPA_C13_C14, DWBCP, 0, DWB_GAMUT_REMAPA_C14, mask_sh),\
+ SF_DWB2(DWB_GAMUT_REMAPA_C21_C22, DWBCP, 0, DWB_GAMUT_REMAPA_C21, mask_sh),\
+ SF_DWB2(DWB_GAMUT_REMAPA_C21_C22, DWBCP, 0, DWB_GAMUT_REMAPA_C22, mask_sh),\
+ SF_DWB2(DWB_GAMUT_REMAPA_C23_C24, DWBCP, 0, DWB_GAMUT_REMAPA_C23, mask_sh),\
+ SF_DWB2(DWB_GAMUT_REMAPA_C23_C24, DWBCP, 0, DWB_GAMUT_REMAPA_C24, mask_sh),\
+ SF_DWB2(DWB_GAMUT_REMAPA_C31_C32, DWBCP, 0, DWB_GAMUT_REMAPA_C31, mask_sh),\
+ SF_DWB2(DWB_GAMUT_REMAPA_C31_C32, DWBCP, 0, DWB_GAMUT_REMAPA_C32, mask_sh),\
+ SF_DWB2(DWB_GAMUT_REMAPA_C33_C34, DWBCP, 0, DWB_GAMUT_REMAPA_C33, mask_sh),\
+ SF_DWB2(DWB_GAMUT_REMAPA_C33_C34, DWBCP, 0, DWB_GAMUT_REMAPA_C34, mask_sh),\
+ SF_DWB2(DWB_GAMUT_REMAPB_C11_C12, DWBCP, 0, DWB_GAMUT_REMAPB_C11, mask_sh),\
+ SF_DWB2(DWB_GAMUT_REMAPB_C11_C12, DWBCP, 0, DWB_GAMUT_REMAPB_C12, mask_sh),\
+ SF_DWB2(DWB_GAMUT_REMAPB_C13_C14, DWBCP, 0, DWB_GAMUT_REMAPB_C13, mask_sh),\
+ SF_DWB2(DWB_GAMUT_REMAPB_C13_C14, DWBCP, 0, DWB_GAMUT_REMAPB_C14, mask_sh),\
+ SF_DWB2(DWB_GAMUT_REMAPB_C21_C22, DWBCP, 0, DWB_GAMUT_REMAPB_C21, mask_sh),\
+ SF_DWB2(DWB_GAMUT_REMAPB_C21_C22, DWBCP, 0, DWB_GAMUT_REMAPB_C22, mask_sh),\
+ SF_DWB2(DWB_GAMUT_REMAPB_C23_C24, DWBCP, 0, DWB_GAMUT_REMAPB_C23, mask_sh),\
+ SF_DWB2(DWB_GAMUT_REMAPB_C23_C24, DWBCP, 0, DWB_GAMUT_REMAPB_C24, mask_sh),\
+ SF_DWB2(DWB_GAMUT_REMAPB_C31_C32, DWBCP, 0, DWB_GAMUT_REMAPB_C31, mask_sh),\
+ SF_DWB2(DWB_GAMUT_REMAPB_C31_C32, DWBCP, 0, DWB_GAMUT_REMAPB_C32, mask_sh),\
+ SF_DWB2(DWB_GAMUT_REMAPB_C33_C34, DWBCP, 0, DWB_GAMUT_REMAPB_C33, mask_sh),\
+ SF_DWB2(DWB_GAMUT_REMAPB_C33_C34, DWBCP, 0, DWB_GAMUT_REMAPB_C34, mask_sh),\
+ SF_DWB2(DWB_OGAM_CONTROL, DWBCP, 0, DWB_OGAM_MODE, mask_sh),\
+ SF_DWB2(DWB_OGAM_CONTROL, DWBCP, 0, DWB_OGAM_SELECT, mask_sh),\
+ SF_DWB2(DWB_OGAM_CONTROL, DWBCP, 0, DWB_OGAM_PWL_DISABLE, mask_sh),\
+ SF_DWB2(DWB_OGAM_CONTROL, DWBCP, 0, DWB_OGAM_MODE_CURRENT, mask_sh),\
+ SF_DWB2(DWB_OGAM_CONTROL, DWBCP, 0, DWB_OGAM_SELECT_CURRENT, mask_sh),\
+ SF_DWB2(DWB_OGAM_LUT_INDEX, DWBCP, 0, DWB_OGAM_LUT_INDEX, mask_sh),\
+ SF_DWB2(DWB_OGAM_LUT_DATA, DWBCP, 0, DWB_OGAM_LUT_DATA, mask_sh),\
+ SF_DWB2(DWB_OGAM_LUT_CONTROL, DWBCP, 0, DWB_OGAM_LUT_WRITE_COLOR_MASK, mask_sh),\
+ SF_DWB2(DWB_OGAM_LUT_CONTROL, DWBCP, 0, DWB_OGAM_LUT_READ_COLOR_SEL, mask_sh),\
+ SF_DWB2(DWB_OGAM_LUT_CONTROL, DWBCP, 0, DWB_OGAM_LUT_READ_DBG, mask_sh),\
+ SF_DWB2(DWB_OGAM_LUT_CONTROL, DWBCP, 0, DWB_OGAM_LUT_HOST_SEL, mask_sh),\
+ SF_DWB2(DWB_OGAM_LUT_CONTROL, DWBCP, 0, DWB_OGAM_LUT_CONFIG_MODE, mask_sh),\
+ SF_DWB2(DWB_OGAM_RAMA_START_CNTL_B, DWBCP, 0, DWB_OGAM_RAMA_EXP_REGION_START_B, mask_sh),\
+ SF_DWB2(DWB_OGAM_RAMA_START_CNTL_B, DWBCP, 0, DWB_OGAM_RAMA_EXP_REGION_START_SEGMENT_B, mask_sh),\
+ SF_DWB2(DWB_OGAM_RAMA_START_CNTL_G, DWBCP, 0, DWB_OGAM_RAMA_EXP_REGION_START_G, mask_sh),\
+ SF_DWB2(DWB_OGAM_RAMA_START_CNTL_G, DWBCP, 0, DWB_OGAM_RAMA_EXP_REGION_START_SEGMENT_G, mask_sh),\
+ SF_DWB2(DWB_OGAM_RAMA_START_CNTL_R, DWBCP, 0, DWB_OGAM_RAMA_EXP_REGION_START_R, mask_sh),\
+ SF_DWB2(DWB_OGAM_RAMA_START_CNTL_R, DWBCP, 0, DWB_OGAM_RAMA_EXP_REGION_START_SEGMENT_R, mask_sh),\
+ SF_DWB2(DWB_OGAM_RAMA_START_BASE_CNTL_B, DWBCP, 0, DWB_OGAM_RAMA_EXP_REGION_START_BASE_B, mask_sh),\
+ SF_DWB2(DWB_OGAM_RAMA_START_SLOPE_CNTL_B, DWBCP, 0, DWB_OGAM_RAMA_EXP_REGION_START_SLOPE_B, mask_sh),\
+ SF_DWB2(DWB_OGAM_RAMA_START_BASE_CNTL_G, DWBCP, 0, DWB_OGAM_RAMA_EXP_REGION_START_BASE_G, mask_sh),\
+ SF_DWB2(DWB_OGAM_RAMA_START_SLOPE_CNTL_G, DWBCP, 0, DWB_OGAM_RAMA_EXP_REGION_START_SLOPE_G, mask_sh),\
+ SF_DWB2(DWB_OGAM_RAMA_START_BASE_CNTL_R, DWBCP, 0, DWB_OGAM_RAMA_EXP_REGION_START_BASE_R, mask_sh),\
+ SF_DWB2(DWB_OGAM_RAMA_START_SLOPE_CNTL_R, DWBCP, 0, DWB_OGAM_RAMA_EXP_REGION_START_SLOPE_R, mask_sh),\
+ SF_DWB2(DWB_OGAM_RAMA_END_CNTL1_B, DWBCP, 0, DWB_OGAM_RAMA_EXP_REGION_END_BASE_B, mask_sh),\
+ SF_DWB2(DWB_OGAM_RAMA_END_CNTL2_B, DWBCP, 0, DWB_OGAM_RAMA_EXP_REGION_END_B, mask_sh),\
+ SF_DWB2(DWB_OGAM_RAMA_END_CNTL2_B, DWBCP, 0, DWB_OGAM_RAMA_EXP_REGION_END_SLOPE_B, mask_sh),\
+ SF_DWB2(DWB_OGAM_RAMA_END_CNTL1_G, DWBCP, 0, DWB_OGAM_RAMA_EXP_REGION_END_BASE_G, mask_sh),\
+ SF_DWB2(DWB_OGAM_RAMA_END_CNTL2_G, DWBCP, 0, DWB_OGAM_RAMA_EXP_REGION_END_G, mask_sh),\
+ SF_DWB2(DWB_OGAM_RAMA_END_CNTL2_G, DWBCP, 0, DWB_OGAM_RAMA_EXP_REGION_END_SLOPE_G, mask_sh),\
+ SF_DWB2(DWB_OGAM_RAMA_END_CNTL1_R, DWBCP, 0, DWB_OGAM_RAMA_EXP_REGION_END_BASE_R, mask_sh),\
+ SF_DWB2(DWB_OGAM_RAMA_END_CNTL2_R, DWBCP, 0, DWB_OGAM_RAMA_EXP_REGION_END_R, mask_sh),\
+ SF_DWB2(DWB_OGAM_RAMA_END_CNTL2_R, DWBCP, 0, DWB_OGAM_RAMA_EXP_REGION_END_SLOPE_R, mask_sh),\
+ SF_DWB2(DWB_OGAM_RAMA_OFFSET_B, DWBCP, 0, DWB_OGAM_RAMA_OFFSET_B, mask_sh),\
+ SF_DWB2(DWB_OGAM_RAMA_OFFSET_G, DWBCP, 0, DWB_OGAM_RAMA_OFFSET_G, mask_sh),\
+ SF_DWB2(DWB_OGAM_RAMA_OFFSET_R, DWBCP, 0, DWB_OGAM_RAMA_OFFSET_R, mask_sh),\
+ SF_DWB2(DWB_OGAM_RAMA_REGION_0_1, DWBCP, 0, DWB_OGAM_RAMA_EXP_REGION0_LUT_OFFSET, mask_sh),\
+ SF_DWB2(DWB_OGAM_RAMA_REGION_0_1, DWBCP, 0, DWB_OGAM_RAMA_EXP_REGION0_NUM_SEGMENTS, mask_sh),\
+ SF_DWB2(DWB_OGAM_RAMA_REGION_0_1, DWBCP, 0, DWB_OGAM_RAMA_EXP_REGION1_LUT_OFFSET, mask_sh),\
+ SF_DWB2(DWB_OGAM_RAMA_REGION_0_1, DWBCP, 0, DWB_OGAM_RAMA_EXP_REGION1_NUM_SEGMENTS, mask_sh),\
+ SF_DWB2(DWB_OGAM_RAMA_REGION_2_3, DWBCP, 0, DWB_OGAM_RAMA_EXP_REGION2_LUT_OFFSET, mask_sh),\
+ SF_DWB2(DWB_OGAM_RAMA_REGION_2_3, DWBCP, 0, DWB_OGAM_RAMA_EXP_REGION2_NUM_SEGMENTS, mask_sh),\
+ SF_DWB2(DWB_OGAM_RAMA_REGION_2_3, DWBCP, 0, DWB_OGAM_RAMA_EXP_REGION3_LUT_OFFSET, mask_sh),\
+ SF_DWB2(DWB_OGAM_RAMA_REGION_2_3, DWBCP, 0, DWB_OGAM_RAMA_EXP_REGION3_NUM_SEGMENTS, mask_sh),\
+ SF_DWB2(DWB_OGAM_RAMA_REGION_4_5, DWBCP, 0, DWB_OGAM_RAMA_EXP_REGION4_LUT_OFFSET, mask_sh),\
+ SF_DWB2(DWB_OGAM_RAMA_REGION_4_5, DWBCP, 0, DWB_OGAM_RAMA_EXP_REGION4_NUM_SEGMENTS, mask_sh),\
+ SF_DWB2(DWB_OGAM_RAMA_REGION_4_5, DWBCP, 0, DWB_OGAM_RAMA_EXP_REGION5_LUT_OFFSET, mask_sh),\
+ SF_DWB2(DWB_OGAM_RAMA_REGION_4_5, DWBCP, 0, DWB_OGAM_RAMA_EXP_REGION5_NUM_SEGMENTS, mask_sh),\
+ SF_DWB2(DWB_OGAM_RAMA_REGION_6_7, DWBCP, 0, DWB_OGAM_RAMA_EXP_REGION6_LUT_OFFSET, mask_sh),\
+ SF_DWB2(DWB_OGAM_RAMA_REGION_6_7, DWBCP, 0, DWB_OGAM_RAMA_EXP_REGION6_NUM_SEGMENTS, mask_sh),\
+ SF_DWB2(DWB_OGAM_RAMA_REGION_6_7, DWBCP, 0, DWB_OGAM_RAMA_EXP_REGION7_LUT_OFFSET, mask_sh),\
+ SF_DWB2(DWB_OGAM_RAMA_REGION_6_7, DWBCP, 0, DWB_OGAM_RAMA_EXP_REGION7_NUM_SEGMENTS, mask_sh),\
+ SF_DWB2(DWB_OGAM_RAMA_REGION_8_9, DWBCP, 0, DWB_OGAM_RAMA_EXP_REGION8_LUT_OFFSET, mask_sh),\
+ SF_DWB2(DWB_OGAM_RAMA_REGION_8_9, DWBCP, 0, DWB_OGAM_RAMA_EXP_REGION8_NUM_SEGMENTS, mask_sh),\
+ SF_DWB2(DWB_OGAM_RAMA_REGION_8_9, DWBCP, 0, DWB_OGAM_RAMA_EXP_REGION9_LUT_OFFSET, mask_sh),\
+ SF_DWB2(DWB_OGAM_RAMA_REGION_8_9, DWBCP, 0, DWB_OGAM_RAMA_EXP_REGION9_NUM_SEGMENTS, mask_sh),\
+ SF_DWB2(DWB_OGAM_RAMA_REGION_10_11, DWBCP, 0, DWB_OGAM_RAMA_EXP_REGION10_LUT_OFFSET, mask_sh),\
+ SF_DWB2(DWB_OGAM_RAMA_REGION_10_11, DWBCP, 0, DWB_OGAM_RAMA_EXP_REGION10_NUM_SEGMENTS, mask_sh),\
+ SF_DWB2(DWB_OGAM_RAMA_REGION_10_11, DWBCP, 0, DWB_OGAM_RAMA_EXP_REGION11_LUT_OFFSET, mask_sh),\
+ SF_DWB2(DWB_OGAM_RAMA_REGION_10_11, DWBCP, 0, DWB_OGAM_RAMA_EXP_REGION11_NUM_SEGMENTS, mask_sh),\
+ SF_DWB2(DWB_OGAM_RAMA_REGION_12_13, DWBCP, 0, DWB_OGAM_RAMA_EXP_REGION12_LUT_OFFSET, mask_sh),\
+ SF_DWB2(DWB_OGAM_RAMA_REGION_12_13, DWBCP, 0, DWB_OGAM_RAMA_EXP_REGION12_NUM_SEGMENTS, mask_sh),\
+ SF_DWB2(DWB_OGAM_RAMA_REGION_12_13, DWBCP, 0, DWB_OGAM_RAMA_EXP_REGION13_LUT_OFFSET, mask_sh),\
+ SF_DWB2(DWB_OGAM_RAMA_REGION_12_13, DWBCP, 0, DWB_OGAM_RAMA_EXP_REGION13_NUM_SEGMENTS, mask_sh),\
+ SF_DWB2(DWB_OGAM_RAMA_REGION_14_15, DWBCP, 0, DWB_OGAM_RAMA_EXP_REGION14_LUT_OFFSET, mask_sh),\
+ SF_DWB2(DWB_OGAM_RAMA_REGION_14_15, DWBCP, 0, DWB_OGAM_RAMA_EXP_REGION14_NUM_SEGMENTS, mask_sh),\
+ SF_DWB2(DWB_OGAM_RAMA_REGION_14_15, DWBCP, 0, DWB_OGAM_RAMA_EXP_REGION15_LUT_OFFSET, mask_sh),\
+ SF_DWB2(DWB_OGAM_RAMA_REGION_14_15, DWBCP, 0, DWB_OGAM_RAMA_EXP_REGION15_NUM_SEGMENTS, mask_sh),\
+ SF_DWB2(DWB_OGAM_RAMA_REGION_16_17, DWBCP, 0, DWB_OGAM_RAMA_EXP_REGION16_LUT_OFFSET, mask_sh),\
+ SF_DWB2(DWB_OGAM_RAMA_REGION_16_17, DWBCP, 0, DWB_OGAM_RAMA_EXP_REGION16_NUM_SEGMENTS, mask_sh),\
+ SF_DWB2(DWB_OGAM_RAMA_REGION_16_17, DWBCP, 0, DWB_OGAM_RAMA_EXP_REGION17_LUT_OFFSET, mask_sh),\
+ SF_DWB2(DWB_OGAM_RAMA_REGION_16_17, DWBCP, 0, DWB_OGAM_RAMA_EXP_REGION17_NUM_SEGMENTS, mask_sh),\
+ SF_DWB2(DWB_OGAM_RAMA_REGION_18_19, DWBCP, 0, DWB_OGAM_RAMA_EXP_REGION18_LUT_OFFSET, mask_sh),\
+ SF_DWB2(DWB_OGAM_RAMA_REGION_18_19, DWBCP, 0, DWB_OGAM_RAMA_EXP_REGION18_NUM_SEGMENTS, mask_sh),\
+ SF_DWB2(DWB_OGAM_RAMA_REGION_18_19, DWBCP, 0, DWB_OGAM_RAMA_EXP_REGION19_LUT_OFFSET, mask_sh),\
+ SF_DWB2(DWB_OGAM_RAMA_REGION_18_19, DWBCP, 0, DWB_OGAM_RAMA_EXP_REGION19_NUM_SEGMENTS, mask_sh),\
+ SF_DWB2(DWB_OGAM_RAMA_REGION_20_21, DWBCP, 0, DWB_OGAM_RAMA_EXP_REGION20_LUT_OFFSET, mask_sh),\
+ SF_DWB2(DWB_OGAM_RAMA_REGION_20_21, DWBCP, 0, DWB_OGAM_RAMA_EXP_REGION20_NUM_SEGMENTS, mask_sh),\
+ SF_DWB2(DWB_OGAM_RAMA_REGION_20_21, DWBCP, 0, DWB_OGAM_RAMA_EXP_REGION21_LUT_OFFSET, mask_sh),\
+ SF_DWB2(DWB_OGAM_RAMA_REGION_20_21, DWBCP, 0, DWB_OGAM_RAMA_EXP_REGION21_NUM_SEGMENTS, mask_sh),\
+ SF_DWB2(DWB_OGAM_RAMA_REGION_22_23, DWBCP, 0, DWB_OGAM_RAMA_EXP_REGION22_LUT_OFFSET, mask_sh),\
+ SF_DWB2(DWB_OGAM_RAMA_REGION_22_23, DWBCP, 0, DWB_OGAM_RAMA_EXP_REGION22_NUM_SEGMENTS, mask_sh),\
+ SF_DWB2(DWB_OGAM_RAMA_REGION_22_23, DWBCP, 0, DWB_OGAM_RAMA_EXP_REGION23_LUT_OFFSET, mask_sh),\
+ SF_DWB2(DWB_OGAM_RAMA_REGION_22_23, DWBCP, 0, DWB_OGAM_RAMA_EXP_REGION23_NUM_SEGMENTS, mask_sh),\
+ SF_DWB2(DWB_OGAM_RAMA_REGION_24_25, DWBCP, 0, DWB_OGAM_RAMA_EXP_REGION24_LUT_OFFSET, mask_sh),\
+ SF_DWB2(DWB_OGAM_RAMA_REGION_24_25, DWBCP, 0, DWB_OGAM_RAMA_EXP_REGION24_NUM_SEGMENTS, mask_sh),\
+ SF_DWB2(DWB_OGAM_RAMA_REGION_24_25, DWBCP, 0, DWB_OGAM_RAMA_EXP_REGION25_LUT_OFFSET, mask_sh),\
+ SF_DWB2(DWB_OGAM_RAMA_REGION_24_25, DWBCP, 0, DWB_OGAM_RAMA_EXP_REGION25_NUM_SEGMENTS, mask_sh),\
+ SF_DWB2(DWB_OGAM_RAMA_REGION_26_27, DWBCP, 0, DWB_OGAM_RAMA_EXP_REGION26_LUT_OFFSET, mask_sh),\
+ SF_DWB2(DWB_OGAM_RAMA_REGION_26_27, DWBCP, 0, DWB_OGAM_RAMA_EXP_REGION26_NUM_SEGMENTS, mask_sh),\
+ SF_DWB2(DWB_OGAM_RAMA_REGION_26_27, DWBCP, 0, DWB_OGAM_RAMA_EXP_REGION27_LUT_OFFSET, mask_sh),\
+ SF_DWB2(DWB_OGAM_RAMA_REGION_26_27, DWBCP, 0, DWB_OGAM_RAMA_EXP_REGION27_NUM_SEGMENTS, mask_sh),\
+ SF_DWB2(DWB_OGAM_RAMA_REGION_28_29, DWBCP, 0, DWB_OGAM_RAMA_EXP_REGION28_LUT_OFFSET, mask_sh),\
+ SF_DWB2(DWB_OGAM_RAMA_REGION_28_29, DWBCP, 0, DWB_OGAM_RAMA_EXP_REGION28_NUM_SEGMENTS, mask_sh),\
+ SF_DWB2(DWB_OGAM_RAMA_REGION_28_29, DWBCP, 0, DWB_OGAM_RAMA_EXP_REGION29_LUT_OFFSET, mask_sh),\
+ SF_DWB2(DWB_OGAM_RAMA_REGION_28_29, DWBCP, 0, DWB_OGAM_RAMA_EXP_REGION29_NUM_SEGMENTS, mask_sh),\
+ SF_DWB2(DWB_OGAM_RAMA_REGION_30_31, DWBCP, 0, DWB_OGAM_RAMA_EXP_REGION30_LUT_OFFSET, mask_sh),\
+ SF_DWB2(DWB_OGAM_RAMA_REGION_30_31, DWBCP, 0, DWB_OGAM_RAMA_EXP_REGION30_NUM_SEGMENTS, mask_sh),\
+ SF_DWB2(DWB_OGAM_RAMA_REGION_30_31, DWBCP, 0, DWB_OGAM_RAMA_EXP_REGION31_LUT_OFFSET, mask_sh),\
+ SF_DWB2(DWB_OGAM_RAMA_REGION_30_31, DWBCP, 0, DWB_OGAM_RAMA_EXP_REGION31_NUM_SEGMENTS, mask_sh),\
+ SF_DWB2(DWB_OGAM_RAMA_REGION_32_33, DWBCP, 0, DWB_OGAM_RAMA_EXP_REGION32_LUT_OFFSET, mask_sh),\
+ SF_DWB2(DWB_OGAM_RAMA_REGION_32_33, DWBCP, 0, DWB_OGAM_RAMA_EXP_REGION32_NUM_SEGMENTS, mask_sh),\
+ SF_DWB2(DWB_OGAM_RAMA_REGION_32_33, DWBCP, 0, DWB_OGAM_RAMA_EXP_REGION33_LUT_OFFSET, mask_sh),\
+ SF_DWB2(DWB_OGAM_RAMA_REGION_32_33, DWBCP, 0, DWB_OGAM_RAMA_EXP_REGION33_NUM_SEGMENTS, mask_sh),\
+ SF_DWB2(DWB_OGAM_RAMB_START_CNTL_B, DWBCP, 0, DWB_OGAM_RAMB_EXP_REGION_START_B, mask_sh),\
+ SF_DWB2(DWB_OGAM_RAMB_START_CNTL_B, DWBCP, 0, DWB_OGAM_RAMB_EXP_REGION_START_SEGMENT_B, mask_sh),\
+ SF_DWB2(DWB_OGAM_RAMB_START_CNTL_G, DWBCP, 0, DWB_OGAM_RAMB_EXP_REGION_START_G, mask_sh),\
+ SF_DWB2(DWB_OGAM_RAMB_START_CNTL_G, DWBCP, 0, DWB_OGAM_RAMB_EXP_REGION_START_SEGMENT_G, mask_sh),\
+ SF_DWB2(DWB_OGAM_RAMB_START_CNTL_R, DWBCP, 0, DWB_OGAM_RAMB_EXP_REGION_START_R, mask_sh),\
+ SF_DWB2(DWB_OGAM_RAMB_START_CNTL_R, DWBCP, 0, DWB_OGAM_RAMB_EXP_REGION_START_SEGMENT_R, mask_sh),\
+ SF_DWB2(DWB_OGAM_RAMB_START_BASE_CNTL_B, DWBCP, 0, DWB_OGAM_RAMB_EXP_REGION_START_BASE_B, mask_sh),\
+ SF_DWB2(DWB_OGAM_RAMB_START_SLOPE_CNTL_B, DWBCP, 0, DWB_OGAM_RAMB_EXP_REGION_START_SLOPE_B, mask_sh),\
+ SF_DWB2(DWB_OGAM_RAMB_START_BASE_CNTL_G, DWBCP, 0, DWB_OGAM_RAMB_EXP_REGION_START_BASE_G, mask_sh),\
+ SF_DWB2(DWB_OGAM_RAMB_START_SLOPE_CNTL_G, DWBCP, 0, DWB_OGAM_RAMB_EXP_REGION_START_SLOPE_G, mask_sh),\
+ SF_DWB2(DWB_OGAM_RAMB_START_BASE_CNTL_R, DWBCP, 0, DWB_OGAM_RAMB_EXP_REGION_START_BASE_R, mask_sh),\
+ SF_DWB2(DWB_OGAM_RAMB_START_SLOPE_CNTL_R, DWBCP, 0, DWB_OGAM_RAMB_EXP_REGION_START_SLOPE_R, mask_sh),\
+ SF_DWB2(DWB_OGAM_RAMB_END_CNTL1_B, DWBCP, 0, DWB_OGAM_RAMB_EXP_REGION_END_BASE_B, mask_sh),\
+ SF_DWB2(DWB_OGAM_RAMB_END_CNTL2_B, DWBCP, 0, DWB_OGAM_RAMB_EXP_REGION_END_B, mask_sh),\
+ SF_DWB2(DWB_OGAM_RAMB_END_CNTL2_B, DWBCP, 0, DWB_OGAM_RAMB_EXP_REGION_END_SLOPE_B, mask_sh),\
+ SF_DWB2(DWB_OGAM_RAMB_END_CNTL1_G, DWBCP, 0, DWB_OGAM_RAMB_EXP_REGION_END_BASE_G, mask_sh),\
+ SF_DWB2(DWB_OGAM_RAMB_END_CNTL2_G, DWBCP, 0, DWB_OGAM_RAMB_EXP_REGION_END_G, mask_sh),\
+ SF_DWB2(DWB_OGAM_RAMB_END_CNTL2_G, DWBCP, 0, DWB_OGAM_RAMB_EXP_REGION_END_SLOPE_G, mask_sh),\
+ SF_DWB2(DWB_OGAM_RAMB_END_CNTL1_R, DWBCP, 0, DWB_OGAM_RAMB_EXP_REGION_END_BASE_R, mask_sh),\
+ SF_DWB2(DWB_OGAM_RAMB_END_CNTL2_R, DWBCP, 0, DWB_OGAM_RAMB_EXP_REGION_END_R, mask_sh),\
+ SF_DWB2(DWB_OGAM_RAMB_END_CNTL2_R, DWBCP, 0, DWB_OGAM_RAMB_EXP_REGION_END_SLOPE_R, mask_sh),\
+ SF_DWB2(DWB_OGAM_RAMB_OFFSET_B, DWBCP, 0, DWB_OGAM_RAMB_OFFSET_B, mask_sh),\
+ SF_DWB2(DWB_OGAM_RAMB_OFFSET_G, DWBCP, 0, DWB_OGAM_RAMB_OFFSET_G, mask_sh),\
+ SF_DWB2(DWB_OGAM_RAMB_OFFSET_R, DWBCP, 0, DWB_OGAM_RAMB_OFFSET_R, mask_sh),\
+ SF_DWB2(DWB_OGAM_RAMB_REGION_0_1, DWBCP, 0, DWB_OGAM_RAMB_EXP_REGION0_LUT_OFFSET, mask_sh),\
+ SF_DWB2(DWB_OGAM_RAMB_REGION_0_1, DWBCP, 0, DWB_OGAM_RAMB_EXP_REGION0_NUM_SEGMENTS, mask_sh),\
+ SF_DWB2(DWB_OGAM_RAMB_REGION_0_1, DWBCP, 0, DWB_OGAM_RAMB_EXP_REGION1_LUT_OFFSET, mask_sh),\
+ SF_DWB2(DWB_OGAM_RAMB_REGION_0_1, DWBCP, 0, DWB_OGAM_RAMB_EXP_REGION1_NUM_SEGMENTS, mask_sh),\
+ SF_DWB2(DWB_OGAM_RAMB_REGION_2_3, DWBCP, 0, DWB_OGAM_RAMB_EXP_REGION2_LUT_OFFSET, mask_sh),\
+ SF_DWB2(DWB_OGAM_RAMB_REGION_2_3, DWBCP, 0, DWB_OGAM_RAMB_EXP_REGION2_NUM_SEGMENTS, mask_sh),\
+ SF_DWB2(DWB_OGAM_RAMB_REGION_2_3, DWBCP, 0, DWB_OGAM_RAMB_EXP_REGION3_LUT_OFFSET, mask_sh),\
+ SF_DWB2(DWB_OGAM_RAMB_REGION_2_3, DWBCP, 0, DWB_OGAM_RAMB_EXP_REGION3_NUM_SEGMENTS, mask_sh),\
+ SF_DWB2(DWB_OGAM_RAMB_REGION_4_5, DWBCP, 0, DWB_OGAM_RAMB_EXP_REGION4_LUT_OFFSET, mask_sh),\
+ SF_DWB2(DWB_OGAM_RAMB_REGION_4_5, DWBCP, 0, DWB_OGAM_RAMB_EXP_REGION4_NUM_SEGMENTS, mask_sh),\
+ SF_DWB2(DWB_OGAM_RAMB_REGION_4_5, DWBCP, 0, DWB_OGAM_RAMB_EXP_REGION5_LUT_OFFSET, mask_sh),\
+ SF_DWB2(DWB_OGAM_RAMB_REGION_4_5, DWBCP, 0, DWB_OGAM_RAMB_EXP_REGION5_NUM_SEGMENTS, mask_sh),\
+ SF_DWB2(DWB_OGAM_RAMB_REGION_6_7, DWBCP, 0, DWB_OGAM_RAMB_EXP_REGION6_LUT_OFFSET, mask_sh),\
+ SF_DWB2(DWB_OGAM_RAMB_REGION_6_7, DWBCP, 0, DWB_OGAM_RAMB_EXP_REGION6_NUM_SEGMENTS, mask_sh),\
+ SF_DWB2(DWB_OGAM_RAMB_REGION_6_7, DWBCP, 0, DWB_OGAM_RAMB_EXP_REGION7_LUT_OFFSET, mask_sh),\
+ SF_DWB2(DWB_OGAM_RAMB_REGION_6_7, DWBCP, 0, DWB_OGAM_RAMB_EXP_REGION7_NUM_SEGMENTS, mask_sh),\
+ SF_DWB2(DWB_OGAM_RAMB_REGION_8_9, DWBCP, 0, DWB_OGAM_RAMB_EXP_REGION8_LUT_OFFSET, mask_sh),\
+ SF_DWB2(DWB_OGAM_RAMB_REGION_8_9, DWBCP, 0, DWB_OGAM_RAMB_EXP_REGION8_NUM_SEGMENTS, mask_sh),\
+ SF_DWB2(DWB_OGAM_RAMB_REGION_8_9, DWBCP, 0, DWB_OGAM_RAMB_EXP_REGION9_LUT_OFFSET, mask_sh),\
+ SF_DWB2(DWB_OGAM_RAMB_REGION_8_9, DWBCP, 0, DWB_OGAM_RAMB_EXP_REGION9_NUM_SEGMENTS, mask_sh),\
+ SF_DWB2(DWB_OGAM_RAMB_REGION_10_11, DWBCP, 0, DWB_OGAM_RAMB_EXP_REGION10_LUT_OFFSET, mask_sh),\
+ SF_DWB2(DWB_OGAM_RAMB_REGION_10_11, DWBCP, 0, DWB_OGAM_RAMB_EXP_REGION10_NUM_SEGMENTS, mask_sh),\
+ SF_DWB2(DWB_OGAM_RAMB_REGION_10_11, DWBCP, 0, DWB_OGAM_RAMB_EXP_REGION11_LUT_OFFSET, mask_sh),\
+ SF_DWB2(DWB_OGAM_RAMB_REGION_10_11, DWBCP, 0, DWB_OGAM_RAMB_EXP_REGION11_NUM_SEGMENTS, mask_sh),\
+ SF_DWB2(DWB_OGAM_RAMB_REGION_12_13, DWBCP, 0, DWB_OGAM_RAMB_EXP_REGION12_LUT_OFFSET, mask_sh),\
+ SF_DWB2(DWB_OGAM_RAMB_REGION_12_13, DWBCP, 0, DWB_OGAM_RAMB_EXP_REGION12_NUM_SEGMENTS, mask_sh),\
+ SF_DWB2(DWB_OGAM_RAMB_REGION_12_13, DWBCP, 0, DWB_OGAM_RAMB_EXP_REGION13_LUT_OFFSET, mask_sh),\
+ SF_DWB2(DWB_OGAM_RAMB_REGION_12_13, DWBCP, 0, DWB_OGAM_RAMB_EXP_REGION13_NUM_SEGMENTS, mask_sh),\
+ SF_DWB2(DWB_OGAM_RAMB_REGION_14_15, DWBCP, 0, DWB_OGAM_RAMB_EXP_REGION14_LUT_OFFSET, mask_sh),\
+ SF_DWB2(DWB_OGAM_RAMB_REGION_14_15, DWBCP, 0, DWB_OGAM_RAMB_EXP_REGION14_NUM_SEGMENTS, mask_sh),\
+ SF_DWB2(DWB_OGAM_RAMB_REGION_14_15, DWBCP, 0, DWB_OGAM_RAMB_EXP_REGION15_LUT_OFFSET, mask_sh),\
+ SF_DWB2(DWB_OGAM_RAMB_REGION_14_15, DWBCP, 0, DWB_OGAM_RAMB_EXP_REGION15_NUM_SEGMENTS, mask_sh),\
+ SF_DWB2(DWB_OGAM_RAMB_REGION_16_17, DWBCP, 0, DWB_OGAM_RAMB_EXP_REGION16_LUT_OFFSET, mask_sh),\
+ SF_DWB2(DWB_OGAM_RAMB_REGION_16_17, DWBCP, 0, DWB_OGAM_RAMB_EXP_REGION16_NUM_SEGMENTS, mask_sh),\
+ SF_DWB2(DWB_OGAM_RAMB_REGION_16_17, DWBCP, 0, DWB_OGAM_RAMB_EXP_REGION17_LUT_OFFSET, mask_sh),\
+ SF_DWB2(DWB_OGAM_RAMB_REGION_16_17, DWBCP, 0, DWB_OGAM_RAMB_EXP_REGION17_NUM_SEGMENTS, mask_sh),\
+ SF_DWB2(DWB_OGAM_RAMB_REGION_18_19, DWBCP, 0, DWB_OGAM_RAMB_EXP_REGION18_LUT_OFFSET, mask_sh),\
+ SF_DWB2(DWB_OGAM_RAMB_REGION_18_19, DWBCP, 0, DWB_OGAM_RAMB_EXP_REGION18_NUM_SEGMENTS, mask_sh),\
+ SF_DWB2(DWB_OGAM_RAMB_REGION_18_19, DWBCP, 0, DWB_OGAM_RAMB_EXP_REGION19_LUT_OFFSET, mask_sh),\
+ SF_DWB2(DWB_OGAM_RAMB_REGION_18_19, DWBCP, 0, DWB_OGAM_RAMB_EXP_REGION19_NUM_SEGMENTS, mask_sh),\
+ SF_DWB2(DWB_OGAM_RAMB_REGION_20_21, DWBCP, 0, DWB_OGAM_RAMB_EXP_REGION20_LUT_OFFSET, mask_sh),\
+ SF_DWB2(DWB_OGAM_RAMB_REGION_20_21, DWBCP, 0, DWB_OGAM_RAMB_EXP_REGION20_NUM_SEGMENTS, mask_sh),\
+ SF_DWB2(DWB_OGAM_RAMB_REGION_20_21, DWBCP, 0, DWB_OGAM_RAMB_EXP_REGION21_LUT_OFFSET, mask_sh),\
+ SF_DWB2(DWB_OGAM_RAMB_REGION_20_21, DWBCP, 0, DWB_OGAM_RAMB_EXP_REGION21_NUM_SEGMENTS, mask_sh),\
+ SF_DWB2(DWB_OGAM_RAMB_REGION_22_23, DWBCP, 0, DWB_OGAM_RAMB_EXP_REGION22_LUT_OFFSET, mask_sh),\
+ SF_DWB2(DWB_OGAM_RAMB_REGION_22_23, DWBCP, 0, DWB_OGAM_RAMB_EXP_REGION22_NUM_SEGMENTS, mask_sh),\
+ SF_DWB2(DWB_OGAM_RAMB_REGION_22_23, DWBCP, 0, DWB_OGAM_RAMB_EXP_REGION23_LUT_OFFSET, mask_sh),\
+ SF_DWB2(DWB_OGAM_RAMB_REGION_22_23, DWBCP, 0, DWB_OGAM_RAMB_EXP_REGION23_NUM_SEGMENTS, mask_sh),\
+ SF_DWB2(DWB_OGAM_RAMB_REGION_24_25, DWBCP, 0, DWB_OGAM_RAMB_EXP_REGION24_LUT_OFFSET, mask_sh),\
+ SF_DWB2(DWB_OGAM_RAMB_REGION_24_25, DWBCP, 0, DWB_OGAM_RAMB_EXP_REGION24_NUM_SEGMENTS, mask_sh),\
+ SF_DWB2(DWB_OGAM_RAMB_REGION_24_25, DWBCP, 0, DWB_OGAM_RAMB_EXP_REGION25_LUT_OFFSET, mask_sh),\
+ SF_DWB2(DWB_OGAM_RAMB_REGION_24_25, DWBCP, 0, DWB_OGAM_RAMB_EXP_REGION25_NUM_SEGMENTS, mask_sh),\
+ SF_DWB2(DWB_OGAM_RAMB_REGION_26_27, DWBCP, 0, DWB_OGAM_RAMB_EXP_REGION26_LUT_OFFSET, mask_sh),\
+ SF_DWB2(DWB_OGAM_RAMB_REGION_26_27, DWBCP, 0, DWB_OGAM_RAMB_EXP_REGION26_NUM_SEGMENTS, mask_sh),\
+ SF_DWB2(DWB_OGAM_RAMB_REGION_26_27, DWBCP, 0, DWB_OGAM_RAMB_EXP_REGION27_LUT_OFFSET, mask_sh),\
+ SF_DWB2(DWB_OGAM_RAMB_REGION_26_27, DWBCP, 0, DWB_OGAM_RAMB_EXP_REGION27_NUM_SEGMENTS, mask_sh),\
+ SF_DWB2(DWB_OGAM_RAMB_REGION_28_29, DWBCP, 0, DWB_OGAM_RAMB_EXP_REGION28_LUT_OFFSET, mask_sh),\
+ SF_DWB2(DWB_OGAM_RAMB_REGION_28_29, DWBCP, 0, DWB_OGAM_RAMB_EXP_REGION28_NUM_SEGMENTS, mask_sh),\
+ SF_DWB2(DWB_OGAM_RAMB_REGION_28_29, DWBCP, 0, DWB_OGAM_RAMB_EXP_REGION29_LUT_OFFSET, mask_sh),\
+ SF_DWB2(DWB_OGAM_RAMB_REGION_28_29, DWBCP, 0, DWB_OGAM_RAMB_EXP_REGION29_NUM_SEGMENTS, mask_sh),\
+ SF_DWB2(DWB_OGAM_RAMB_REGION_30_31, DWBCP, 0, DWB_OGAM_RAMB_EXP_REGION30_LUT_OFFSET, mask_sh),\
+ SF_DWB2(DWB_OGAM_RAMB_REGION_30_31, DWBCP, 0, DWB_OGAM_RAMB_EXP_REGION30_NUM_SEGMENTS, mask_sh),\
+ SF_DWB2(DWB_OGAM_RAMB_REGION_30_31, DWBCP, 0, DWB_OGAM_RAMB_EXP_REGION31_LUT_OFFSET, mask_sh),\
+ SF_DWB2(DWB_OGAM_RAMB_REGION_30_31, DWBCP, 0, DWB_OGAM_RAMB_EXP_REGION31_NUM_SEGMENTS, mask_sh),\
+ SF_DWB2(DWB_OGAM_RAMB_REGION_32_33, DWBCP, 0, DWB_OGAM_RAMB_EXP_REGION32_LUT_OFFSET, mask_sh),\
+ SF_DWB2(DWB_OGAM_RAMB_REGION_32_33, DWBCP, 0, DWB_OGAM_RAMB_EXP_REGION32_NUM_SEGMENTS, mask_sh),\
+ SF_DWB2(DWB_OGAM_RAMB_REGION_32_33, DWBCP, 0, DWB_OGAM_RAMB_EXP_REGION33_LUT_OFFSET, mask_sh),\
+ SF_DWB2(DWB_OGAM_RAMB_REGION_32_33, DWBCP, 0, DWB_OGAM_RAMB_EXP_REGION33_NUM_SEGMENTS, mask_sh)
+
+
+#define DWBC_REG_FIELD_LIST_DCN3_0(type) \
+ type DWB_ENABLE;\
+ type DISPCLK_R_DWB_GATE_DIS;\
+ type DISPCLK_G_DWB_GATE_DIS;\
+ type DWB_TEST_CLK_SEL;\
+ type DWBSCL_LUT_MEM_PWR_FORCE;\
+ type DWBSCL_LUT_MEM_PWR_DIS;\
+ type DWBSCL_LUT_MEM_PWR_STATE;\
+ type DWBSCL_LB_MEM_PWR_FORCE;\
+ type DWBSCL_LB_MEM_PWR_DIS;\
+ type DWBSCL_LB_MEM_PWR_STATE;\
+ type DWB_OGAM_LUT_MEM_PWR_FORCE;\
+ type DWB_OGAM_LUT_MEM_PWR_DIS;\
+ type DWB_OGAM_LUT_MEM_PWR_STATE;\
+ type FC_FRAME_CAPTURE_EN;\
+ type FC_FRAME_CAPTURE_RATE;\
+ type FC_WINDOW_CROP_EN;\
+ type FC_EYE_SELECTION;\
+ type FC_STEREO_EYE_POLARITY;\
+ type FC_NEW_CONTENT;\
+ type FC_FI_EN;\
+ type FC_FI_PHASE;\
+ type FC_FRAME_CAPTURE_EN_CURRENT;\
+ type FC_FIRST_PIXEL_DELAY_COUNT;\
+ type FC_WINDOW_START_X;\
+ type FC_WINDOW_START_Y;\
+ type FC_WINDOW_WIDTH;\
+ type FC_WINDOW_HEIGHT;\
+ type FC_SOURCE_WIDTH;\
+ type FC_SOURCE_HEIGHT;\
+ type DWB_UPDATE_LOCK;\
+ type DWB_UPDATE_PENDING;\
+ type DWB_CRC_EN;\
+ type DWB_CRC_CONT_EN;\
+ type DWB_CRC_SRC_SEL;\
+ type DWB_CRC_RED_MASK;\
+ type DWB_CRC_GREEN_MASK;\
+ type DWB_CRC_BLUE_MASK;\
+ type DWB_CRC_A_MASK;\
+ type DWB_CRC_SIG_RED;\
+ type DWB_CRC_SIG_GREEN;\
+ type DWB_CRC_SIG_BLUE;\
+ type DWB_CRC_SIG_A;\
+ type OUT_FORMAT;\
+ type OUT_DENORM;\
+ type OUT_MAX;\
+ type OUT_MIN;\
+ type DWB_MMHUBBUB_BACKPRESSURE_CNT_EN;\
+ type DWB_MMHUBBUB_MAX_BACKPRESSURE;\
+ type DWB_HOST_READ_RATE_CONTROL;\
+ type DWBSCL_DATA_OVERFLOW_FLAG;\
+ type DWBSCL_DATA_OVERFLOW_ACK;\
+ type DWBSCL_DATA_OVERFLOW_MASK;\
+ type DWBSCL_DATA_OVERFLOW_INT_STATUS;\
+ type DWBSCL_DATA_OVERFLOW_INT_TYPE;\
+ type DWBSCL_DATA_OVERFLOW_TYPE;\
+ type DWBSCL_DATA_OVERFLOW_OUT_X_CNT;\
+ type DWBSCL_DATA_OVERFLOW_OUT_Y_CNT;\
+ type DWB_SOFT_RESET;\
+ type DWBSCL_COEF_RAM_TAP_PAIR_IDX;\
+ type DWBSCL_COEF_RAM_PHASE;\
+ type DWBSCL_COEF_RAM_FILTER_TYPE;\
+ type DWBSCL_COEF_RAM_SELECT_RD;\
+ type DWBSCL_COEF_RAM_EVEN_TAP_COEF;\
+ type DWBSCL_COEF_RAM_EVEN_TAP_COEF_EN;\
+ type DWBSCL_COEF_RAM_ODD_TAP_COEF;\
+ type DWBSCL_COEF_RAM_ODD_TAP_COEF_EN;\
+ type DWBSCL_MODE;\
+ type DWBSCL_COEF_RAM_SELECT;\
+ type DWBSCL_COEF_RAM_SELECT_CURRENT;\
+ type DWBSCL_H_NUM_OF_TAPS;\
+ type DWBSCL_V_NUM_OF_TAPS;\
+ type DWBSCL_H_SCALE_RATIO;\
+ type DWBSCL_H_INIT_FRAC;\
+ type DWBSCL_H_INIT_INT;\
+ type DWBSCL_V_SCALE_RATIO;\
+ type DWBSCL_V_INIT_FRAC;\
+ type DWBSCL_V_INIT_INT;\
+ type DWBSCL_BOUNDARY_MODE;\
+ type DWBSCL_BLACK_COLOR_RGB;\
+ type DWBSCL_DEST_WIDTH;\
+ type DWBSCL_DEST_HEIGHT;\
+ type DWB_HDR_MULT_COEF;\
+ type DWB_GAMUT_REMAP_MODE;\
+ type DWB_GAMUT_REMAP_MODE_CURRENT;\
+ type DWB_GAMUT_REMAP_COEF_FORMAT;\
+ type DWB_GAMUT_REMAPA_C11;\
+ type DWB_GAMUT_REMAPA_C12;\
+ type DWB_GAMUT_REMAPA_C13;\
+ type DWB_GAMUT_REMAPA_C14;\
+ type DWB_GAMUT_REMAPA_C21;\
+ type DWB_GAMUT_REMAPA_C22;\
+ type DWB_GAMUT_REMAPA_C23;\
+ type DWB_GAMUT_REMAPA_C24;\
+ type DWB_GAMUT_REMAPA_C31;\
+ type DWB_GAMUT_REMAPA_C32;\
+ type DWB_GAMUT_REMAPA_C33;\
+ type DWB_GAMUT_REMAPA_C34;\
+ type DWB_GAMUT_REMAPB_C11;\
+ type DWB_GAMUT_REMAPB_C12;\
+ type DWB_GAMUT_REMAPB_C13;\
+ type DWB_GAMUT_REMAPB_C14;\
+ type DWB_GAMUT_REMAPB_C21;\
+ type DWB_GAMUT_REMAPB_C22;\
+ type DWB_GAMUT_REMAPB_C23;\
+ type DWB_GAMUT_REMAPB_C24;\
+ type DWB_GAMUT_REMAPB_C31;\
+ type DWB_GAMUT_REMAPB_C32;\
+ type DWB_GAMUT_REMAPB_C33;\
+ type DWB_GAMUT_REMAPB_C34;\
+ type DWB_OGAM_MODE;\
+ type DWB_OGAM_SELECT;\
+ type DWB_OGAM_PWL_DISABLE;\
+ type DWB_OGAM_MODE_CURRENT;\
+ type DWB_OGAM_SELECT_CURRENT;\
+ type DWB_OGAM_LUT_INDEX;\
+ type DWB_OGAM_LUT_DATA;\
+ type DWB_OGAM_LUT_WRITE_COLOR_MASK;\
+ type DWB_OGAM_LUT_READ_COLOR_SEL;\
+ type DWB_OGAM_LUT_READ_DBG;\
+ type DWB_OGAM_LUT_HOST_SEL;\
+ type DWB_OGAM_LUT_CONFIG_MODE;\
+ type DWB_OGAM_LUT_STATUS;\
+ type DWB_OGAM_RAMA_EXP_REGION_START_B;\
+ type DWB_OGAM_RAMA_EXP_REGION_START_SEGMENT_B;\
+ type DWB_OGAM_RAMA_EXP_REGION_START_G;\
+ type DWB_OGAM_RAMA_EXP_REGION_START_SEGMENT_G;\
+ type DWB_OGAM_RAMA_EXP_REGION_START_R;\
+ type DWB_OGAM_RAMA_EXP_REGION_START_SEGMENT_R;\
+ type DWB_OGAM_RAMA_EXP_REGION_START_BASE_B;\
+ type DWB_OGAM_RAMA_EXP_REGION_START_SLOPE_B;\
+ type DWB_OGAM_RAMA_EXP_REGION_START_BASE_G;\
+ type DWB_OGAM_RAMA_EXP_REGION_START_SLOPE_G;\
+ type DWB_OGAM_RAMA_EXP_REGION_START_BASE_R;\
+ type DWB_OGAM_RAMA_EXP_REGION_START_SLOPE_R;\
+ type DWB_OGAM_RAMA_EXP_REGION_END_BASE_B;\
+ type DWB_OGAM_RAMA_EXP_REGION_END_B;\
+ type DWB_OGAM_RAMA_EXP_REGION_END_SLOPE_B;\
+ type DWB_OGAM_RAMA_EXP_REGION_END_BASE_G;\
+ type DWB_OGAM_RAMA_EXP_REGION_END_G;\
+ type DWB_OGAM_RAMA_EXP_REGION_END_SLOPE_G;\
+ type DWB_OGAM_RAMA_EXP_REGION_END_BASE_R;\
+ type DWB_OGAM_RAMA_EXP_REGION_END_R;\
+ type DWB_OGAM_RAMA_EXP_REGION_END_SLOPE_R;\
+ type DWB_OGAM_RAMA_OFFSET_B;\
+ type DWB_OGAM_RAMA_OFFSET_G;\
+ type DWB_OGAM_RAMA_OFFSET_R;\
+ type DWB_OGAM_RAMA_EXP_REGION0_LUT_OFFSET;\
+ type DWB_OGAM_RAMA_EXP_REGION0_NUM_SEGMENTS;\
+ type DWB_OGAM_RAMA_EXP_REGION1_LUT_OFFSET;\
+ type DWB_OGAM_RAMA_EXP_REGION1_NUM_SEGMENTS;\
+ type DWB_OGAM_RAMA_EXP_REGION2_LUT_OFFSET;\
+ type DWB_OGAM_RAMA_EXP_REGION2_NUM_SEGMENTS;\
+ type DWB_OGAM_RAMA_EXP_REGION3_LUT_OFFSET;\
+ type DWB_OGAM_RAMA_EXP_REGION3_NUM_SEGMENTS;\
+ type DWB_OGAM_RAMA_EXP_REGION4_LUT_OFFSET;\
+ type DWB_OGAM_RAMA_EXP_REGION4_NUM_SEGMENTS;\
+ type DWB_OGAM_RAMA_EXP_REGION5_LUT_OFFSET;\
+ type DWB_OGAM_RAMA_EXP_REGION5_NUM_SEGMENTS;\
+ type DWB_OGAM_RAMA_EXP_REGION6_LUT_OFFSET;\
+ type DWB_OGAM_RAMA_EXP_REGION6_NUM_SEGMENTS;\
+ type DWB_OGAM_RAMA_EXP_REGION7_LUT_OFFSET;\
+ type DWB_OGAM_RAMA_EXP_REGION7_NUM_SEGMENTS;\
+ type DWB_OGAM_RAMA_EXP_REGION8_LUT_OFFSET;\
+ type DWB_OGAM_RAMA_EXP_REGION8_NUM_SEGMENTS;\
+ type DWB_OGAM_RAMA_EXP_REGION9_LUT_OFFSET;\
+ type DWB_OGAM_RAMA_EXP_REGION9_NUM_SEGMENTS;\
+ type DWB_OGAM_RAMA_EXP_REGION10_LUT_OFFSET;\
+ type DWB_OGAM_RAMA_EXP_REGION10_NUM_SEGMENTS;\
+ type DWB_OGAM_RAMA_EXP_REGION11_LUT_OFFSET;\
+ type DWB_OGAM_RAMA_EXP_REGION11_NUM_SEGMENTS;\
+ type DWB_OGAM_RAMA_EXP_REGION12_LUT_OFFSET;\
+ type DWB_OGAM_RAMA_EXP_REGION12_NUM_SEGMENTS;\
+ type DWB_OGAM_RAMA_EXP_REGION13_LUT_OFFSET;\
+ type DWB_OGAM_RAMA_EXP_REGION13_NUM_SEGMENTS;\
+ type DWB_OGAM_RAMA_EXP_REGION14_LUT_OFFSET;\
+ type DWB_OGAM_RAMA_EXP_REGION14_NUM_SEGMENTS;\
+ type DWB_OGAM_RAMA_EXP_REGION15_LUT_OFFSET;\
+ type DWB_OGAM_RAMA_EXP_REGION15_NUM_SEGMENTS;\
+ type DWB_OGAM_RAMA_EXP_REGION16_LUT_OFFSET;\
+ type DWB_OGAM_RAMA_EXP_REGION16_NUM_SEGMENTS;\
+ type DWB_OGAM_RAMA_EXP_REGION17_LUT_OFFSET;\
+ type DWB_OGAM_RAMA_EXP_REGION17_NUM_SEGMENTS;\
+ type DWB_OGAM_RAMA_EXP_REGION18_LUT_OFFSET;\
+ type DWB_OGAM_RAMA_EXP_REGION18_NUM_SEGMENTS;\
+ type DWB_OGAM_RAMA_EXP_REGION19_LUT_OFFSET;\
+ type DWB_OGAM_RAMA_EXP_REGION19_NUM_SEGMENTS;\
+ type DWB_OGAM_RAMA_EXP_REGION20_LUT_OFFSET;\
+ type DWB_OGAM_RAMA_EXP_REGION20_NUM_SEGMENTS;\
+ type DWB_OGAM_RAMA_EXP_REGION21_LUT_OFFSET;\
+ type DWB_OGAM_RAMA_EXP_REGION21_NUM_SEGMENTS;\
+ type DWB_OGAM_RAMA_EXP_REGION22_LUT_OFFSET;\
+ type DWB_OGAM_RAMA_EXP_REGION22_NUM_SEGMENTS;\
+ type DWB_OGAM_RAMA_EXP_REGION23_LUT_OFFSET;\
+ type DWB_OGAM_RAMA_EXP_REGION23_NUM_SEGMENTS;\
+ type DWB_OGAM_RAMA_EXP_REGION24_LUT_OFFSET;\
+ type DWB_OGAM_RAMA_EXP_REGION24_NUM_SEGMENTS;\
+ type DWB_OGAM_RAMA_EXP_REGION25_LUT_OFFSET;\
+ type DWB_OGAM_RAMA_EXP_REGION25_NUM_SEGMENTS;\
+ type DWB_OGAM_RAMA_EXP_REGION26_LUT_OFFSET;\
+ type DWB_OGAM_RAMA_EXP_REGION26_NUM_SEGMENTS;\
+ type DWB_OGAM_RAMA_EXP_REGION27_LUT_OFFSET;\
+ type DWB_OGAM_RAMA_EXP_REGION27_NUM_SEGMENTS;\
+ type DWB_OGAM_RAMA_EXP_REGION28_LUT_OFFSET;\
+ type DWB_OGAM_RAMA_EXP_REGION28_NUM_SEGMENTS;\
+ type DWB_OGAM_RAMA_EXP_REGION29_LUT_OFFSET;\
+ type DWB_OGAM_RAMA_EXP_REGION29_NUM_SEGMENTS;\
+ type DWB_OGAM_RAMA_EXP_REGION30_LUT_OFFSET;\
+ type DWB_OGAM_RAMA_EXP_REGION30_NUM_SEGMENTS;\
+ type DWB_OGAM_RAMA_EXP_REGION31_LUT_OFFSET;\
+ type DWB_OGAM_RAMA_EXP_REGION31_NUM_SEGMENTS;\
+ type DWB_OGAM_RAMA_EXP_REGION32_LUT_OFFSET;\
+ type DWB_OGAM_RAMA_EXP_REGION32_NUM_SEGMENTS;\
+ type DWB_OGAM_RAMA_EXP_REGION33_LUT_OFFSET;\
+ type DWB_OGAM_RAMA_EXP_REGION33_NUM_SEGMENTS;\
+ type DWB_OGAM_RAMB_EXP_REGION_START_B;\
+ type DWB_OGAM_RAMB_EXP_REGION_START_SEGMENT_B;\
+ type DWB_OGAM_RAMB_EXP_REGION_START_G;\
+ type DWB_OGAM_RAMB_EXP_REGION_START_SEGMENT_G;\
+ type DWB_OGAM_RAMB_EXP_REGION_START_R;\
+ type DWB_OGAM_RAMB_EXP_REGION_START_SEGMENT_R;\
+ type DWB_OGAM_RAMB_EXP_REGION_START_BASE_B;\
+ type DWB_OGAM_RAMB_EXP_REGION_START_SLOPE_B;\
+ type DWB_OGAM_RAMB_EXP_REGION_START_BASE_G;\
+ type DWB_OGAM_RAMB_EXP_REGION_START_SLOPE_G;\
+ type DWB_OGAM_RAMB_EXP_REGION_START_BASE_R;\
+ type DWB_OGAM_RAMB_EXP_REGION_START_SLOPE_R;\
+ type DWB_OGAM_RAMB_EXP_REGION_END_BASE_B;\
+ type DWB_OGAM_RAMB_EXP_REGION_END_B;\
+ type DWB_OGAM_RAMB_EXP_REGION_END_SLOPE_B;\
+ type DWB_OGAM_RAMB_EXP_REGION_END_BASE_G;\
+ type DWB_OGAM_RAMB_EXP_REGION_END_G;\
+ type DWB_OGAM_RAMB_EXP_REGION_END_SLOPE_G;\
+ type DWB_OGAM_RAMB_EXP_REGION_END_BASE_R;\
+ type DWB_OGAM_RAMB_EXP_REGION_END_R;\
+ type DWB_OGAM_RAMB_EXP_REGION_END_SLOPE_R;\
+ type DWB_OGAM_RAMB_OFFSET_B;\
+ type DWB_OGAM_RAMB_OFFSET_G;\
+ type DWB_OGAM_RAMB_OFFSET_R;\
+ type DWB_OGAM_RAMB_EXP_REGION0_LUT_OFFSET;\
+ type DWB_OGAM_RAMB_EXP_REGION0_NUM_SEGMENTS;\
+ type DWB_OGAM_RAMB_EXP_REGION1_LUT_OFFSET;\
+ type DWB_OGAM_RAMB_EXP_REGION1_NUM_SEGMENTS;\
+ type DWB_OGAM_RAMB_EXP_REGION2_LUT_OFFSET;\
+ type DWB_OGAM_RAMB_EXP_REGION2_NUM_SEGMENTS;\
+ type DWB_OGAM_RAMB_EXP_REGION3_LUT_OFFSET;\
+ type DWB_OGAM_RAMB_EXP_REGION3_NUM_SEGMENTS;\
+ type DWB_OGAM_RAMB_EXP_REGION4_LUT_OFFSET;\
+ type DWB_OGAM_RAMB_EXP_REGION4_NUM_SEGMENTS;\
+ type DWB_OGAM_RAMB_EXP_REGION5_LUT_OFFSET;\
+ type DWB_OGAM_RAMB_EXP_REGION5_NUM_SEGMENTS;\
+ type DWB_OGAM_RAMB_EXP_REGION6_LUT_OFFSET;\
+ type DWB_OGAM_RAMB_EXP_REGION6_NUM_SEGMENTS;\
+ type DWB_OGAM_RAMB_EXP_REGION7_LUT_OFFSET;\
+ type DWB_OGAM_RAMB_EXP_REGION7_NUM_SEGMENTS;\
+ type DWB_OGAM_RAMB_EXP_REGION8_LUT_OFFSET;\
+ type DWB_OGAM_RAMB_EXP_REGION8_NUM_SEGMENTS;\
+ type DWB_OGAM_RAMB_EXP_REGION9_LUT_OFFSET;\
+ type DWB_OGAM_RAMB_EXP_REGION9_NUM_SEGMENTS;\
+ type DWB_OGAM_RAMB_EXP_REGION10_LUT_OFFSET;\
+ type DWB_OGAM_RAMB_EXP_REGION10_NUM_SEGMENTS;\
+ type DWB_OGAM_RAMB_EXP_REGION11_LUT_OFFSET;\
+ type DWB_OGAM_RAMB_EXP_REGION11_NUM_SEGMENTS;\
+ type DWB_OGAM_RAMB_EXP_REGION12_LUT_OFFSET;\
+ type DWB_OGAM_RAMB_EXP_REGION12_NUM_SEGMENTS;\
+ type DWB_OGAM_RAMB_EXP_REGION13_LUT_OFFSET;\
+ type DWB_OGAM_RAMB_EXP_REGION13_NUM_SEGMENTS;\
+ type DWB_OGAM_RAMB_EXP_REGION14_LUT_OFFSET;\
+ type DWB_OGAM_RAMB_EXP_REGION14_NUM_SEGMENTS;\
+ type DWB_OGAM_RAMB_EXP_REGION15_LUT_OFFSET;\
+ type DWB_OGAM_RAMB_EXP_REGION15_NUM_SEGMENTS;\
+ type DWB_OGAM_RAMB_EXP_REGION16_LUT_OFFSET;\
+ type DWB_OGAM_RAMB_EXP_REGION16_NUM_SEGMENTS;\
+ type DWB_OGAM_RAMB_EXP_REGION17_LUT_OFFSET;\
+ type DWB_OGAM_RAMB_EXP_REGION17_NUM_SEGMENTS;\
+ type DWB_OGAM_RAMB_EXP_REGION18_LUT_OFFSET;\
+ type DWB_OGAM_RAMB_EXP_REGION18_NUM_SEGMENTS;\
+ type DWB_OGAM_RAMB_EXP_REGION19_LUT_OFFSET;\
+ type DWB_OGAM_RAMB_EXP_REGION19_NUM_SEGMENTS;\
+ type DWB_OGAM_RAMB_EXP_REGION20_LUT_OFFSET;\
+ type DWB_OGAM_RAMB_EXP_REGION20_NUM_SEGMENTS;\
+ type DWB_OGAM_RAMB_EXP_REGION21_LUT_OFFSET;\
+ type DWB_OGAM_RAMB_EXP_REGION21_NUM_SEGMENTS;\
+ type DWB_OGAM_RAMB_EXP_REGION22_LUT_OFFSET;\
+ type DWB_OGAM_RAMB_EXP_REGION22_NUM_SEGMENTS;\
+ type DWB_OGAM_RAMB_EXP_REGION23_LUT_OFFSET;\
+ type DWB_OGAM_RAMB_EXP_REGION23_NUM_SEGMENTS;\
+ type DWB_OGAM_RAMB_EXP_REGION24_LUT_OFFSET;\
+ type DWB_OGAM_RAMB_EXP_REGION24_NUM_SEGMENTS;\
+ type DWB_OGAM_RAMB_EXP_REGION25_LUT_OFFSET;\
+ type DWB_OGAM_RAMB_EXP_REGION25_NUM_SEGMENTS;\
+ type DWB_OGAM_RAMB_EXP_REGION26_LUT_OFFSET;\
+ type DWB_OGAM_RAMB_EXP_REGION26_NUM_SEGMENTS;\
+ type DWB_OGAM_RAMB_EXP_REGION27_LUT_OFFSET;\
+ type DWB_OGAM_RAMB_EXP_REGION27_NUM_SEGMENTS;\
+ type DWB_OGAM_RAMB_EXP_REGION28_LUT_OFFSET;\
+ type DWB_OGAM_RAMB_EXP_REGION28_NUM_SEGMENTS;\
+ type DWB_OGAM_RAMB_EXP_REGION29_LUT_OFFSET;\
+ type DWB_OGAM_RAMB_EXP_REGION29_NUM_SEGMENTS;\
+ type DWB_OGAM_RAMB_EXP_REGION30_LUT_OFFSET;\
+ type DWB_OGAM_RAMB_EXP_REGION30_NUM_SEGMENTS;\
+ type DWB_OGAM_RAMB_EXP_REGION31_LUT_OFFSET;\
+ type DWB_OGAM_RAMB_EXP_REGION31_NUM_SEGMENTS;\
+ type DWB_OGAM_RAMB_EXP_REGION32_LUT_OFFSET;\
+ type DWB_OGAM_RAMB_EXP_REGION32_NUM_SEGMENTS;\
+ type DWB_OGAM_RAMB_EXP_REGION33_LUT_OFFSET;\
+ type DWB_OGAM_RAMB_EXP_REGION33_NUM_SEGMENTS;
+
+struct dcn30_dwbc_registers {
+ /* DCN3AG */
+ /* DWB_TOP */
+ uint32_t DWB_ENABLE_CLK_CTRL;
+ uint32_t DWB_MEM_PWR_CTRL;
+ uint32_t FC_MODE_CTRL;
+ uint32_t FC_FLOW_CTRL;
+ uint32_t FC_WINDOW_START;
+ uint32_t FC_WINDOW_SIZE;
+ uint32_t FC_SOURCE_SIZE;
+ uint32_t DWB_UPDATE_CTRL;
+ uint32_t DWB_CRC_CTRL;
+ uint32_t DWB_CRC_MASK_R_G;
+ uint32_t DWB_CRC_MASK_B_A;
+ uint32_t DWB_CRC_VAL_R_G;
+ uint32_t DWB_CRC_VAL_B_A;
+ uint32_t DWB_OUT_CTRL;
+ uint32_t DWB_MMHUBBUB_BACKPRESSURE_CNT_EN;
+ uint32_t DWB_MMHUBBUB_BACKPRESSURE_CNT;
+ uint32_t DWB_HOST_READ_CONTROL;
+ uint32_t DWB_SOFT_RESET;
+
+ /* DWBSCL */
+ uint32_t DWBSCL_COEF_RAM_TAP_SELECT;
+ uint32_t DWBSCL_COEF_RAM_TAP_DATA;
+ uint32_t DWBSCL_MODE;
+ uint32_t DWBSCL_TAP_CONTROL;
+ uint32_t DWBSCL_HORZ_FILTER_SCALE_RATIO;
+ uint32_t DWBSCL_HORZ_FILTER_INIT;
+ uint32_t DWBSCL_VERT_FILTER_SCALE_RATIO;
+ uint32_t DWBSCL_VERT_FILTER_INIT;
+ uint32_t DWBSCL_BOUNDARY_CTRL;
+ uint32_t DWBSCL_DEST_SIZE;
+ uint32_t DWBSCL_OVERFLOW_STATUS;
+ uint32_t DWBSCL_OVERFLOW_COUNTER;
+
+ /* DWBCP */
+ uint32_t DWB_HDR_MULT_COEF;
+ uint32_t DWB_GAMUT_REMAP_MODE;
+ uint32_t DWB_GAMUT_REMAP_COEF_FORMAT;
+ uint32_t DWB_GAMUT_REMAPA_C11_C12;
+ uint32_t DWB_GAMUT_REMAPA_C13_C14;
+ uint32_t DWB_GAMUT_REMAPA_C21_C22;
+ uint32_t DWB_GAMUT_REMAPA_C23_C24;
+ uint32_t DWB_GAMUT_REMAPA_C31_C32;
+ uint32_t DWB_GAMUT_REMAPA_C33_C34;
+ uint32_t DWB_GAMUT_REMAPB_C11_C12;
+ uint32_t DWB_GAMUT_REMAPB_C13_C14;
+ uint32_t DWB_GAMUT_REMAPB_C21_C22;
+ uint32_t DWB_GAMUT_REMAPB_C23_C24;
+ uint32_t DWB_GAMUT_REMAPB_C31_C32;
+ uint32_t DWB_GAMUT_REMAPB_C33_C34;
+ uint32_t DWB_OGAM_CONTROL;
+ uint32_t DWB_OGAM_LUT_INDEX;
+ uint32_t DWB_OGAM_LUT_DATA;
+ uint32_t DWB_OGAM_LUT_CONTROL;
+ uint32_t DWB_OGAM_RAMA_START_CNTL_B;
+ uint32_t DWB_OGAM_RAMA_START_CNTL_G;
+ uint32_t DWB_OGAM_RAMA_START_CNTL_R;
+ uint32_t DWB_OGAM_RAMA_START_BASE_CNTL_B;
+ uint32_t DWB_OGAM_RAMA_START_SLOPE_CNTL_B;
+ uint32_t DWB_OGAM_RAMA_START_BASE_CNTL_G;
+ uint32_t DWB_OGAM_RAMA_START_SLOPE_CNTL_G;
+ uint32_t DWB_OGAM_RAMA_START_BASE_CNTL_R;
+ uint32_t DWB_OGAM_RAMA_START_SLOPE_CNTL_R;
+ uint32_t DWB_OGAM_RAMA_END_CNTL1_B;
+ uint32_t DWB_OGAM_RAMA_END_CNTL2_B;
+ uint32_t DWB_OGAM_RAMA_END_CNTL1_G;
+ uint32_t DWB_OGAM_RAMA_END_CNTL2_G;
+ uint32_t DWB_OGAM_RAMA_END_CNTL1_R;
+ uint32_t DWB_OGAM_RAMA_END_CNTL2_R;
+ uint32_t DWB_OGAM_RAMA_OFFSET_B;
+ uint32_t DWB_OGAM_RAMA_OFFSET_G;
+ uint32_t DWB_OGAM_RAMA_OFFSET_R;
+ uint32_t DWB_OGAM_RAMA_REGION_0_1;
+ uint32_t DWB_OGAM_RAMA_REGION_2_3;
+ uint32_t DWB_OGAM_RAMA_REGION_4_5;
+ uint32_t DWB_OGAM_RAMA_REGION_6_7;
+ uint32_t DWB_OGAM_RAMA_REGION_8_9;
+ uint32_t DWB_OGAM_RAMA_REGION_10_11;
+ uint32_t DWB_OGAM_RAMA_REGION_12_13;
+ uint32_t DWB_OGAM_RAMA_REGION_14_15;
+ uint32_t DWB_OGAM_RAMA_REGION_16_17;
+ uint32_t DWB_OGAM_RAMA_REGION_18_19;
+ uint32_t DWB_OGAM_RAMA_REGION_20_21;
+ uint32_t DWB_OGAM_RAMA_REGION_22_23;
+ uint32_t DWB_OGAM_RAMA_REGION_24_25;
+ uint32_t DWB_OGAM_RAMA_REGION_26_27;
+ uint32_t DWB_OGAM_RAMA_REGION_28_29;
+ uint32_t DWB_OGAM_RAMA_REGION_30_31;
+ uint32_t DWB_OGAM_RAMA_REGION_32_33;
+ uint32_t DWB_OGAM_RAMB_START_CNTL_B;
+ uint32_t DWB_OGAM_RAMB_START_CNTL_G;
+ uint32_t DWB_OGAM_RAMB_START_CNTL_R;
+ uint32_t DWB_OGAM_RAMB_START_BASE_CNTL_B;
+ uint32_t DWB_OGAM_RAMB_START_SLOPE_CNTL_B;
+ uint32_t DWB_OGAM_RAMB_START_BASE_CNTL_G;
+ uint32_t DWB_OGAM_RAMB_START_SLOPE_CNTL_G;
+ uint32_t DWB_OGAM_RAMB_START_BASE_CNTL_R;
+ uint32_t DWB_OGAM_RAMB_START_SLOPE_CNTL_R;
+ uint32_t DWB_OGAM_RAMB_END_CNTL1_B;
+ uint32_t DWB_OGAM_RAMB_END_CNTL2_B;
+ uint32_t DWB_OGAM_RAMB_END_CNTL1_G;
+ uint32_t DWB_OGAM_RAMB_END_CNTL2_G;
+ uint32_t DWB_OGAM_RAMB_END_CNTL1_R;
+ uint32_t DWB_OGAM_RAMB_END_CNTL2_R;
+ uint32_t DWB_OGAM_RAMB_OFFSET_B;
+ uint32_t DWB_OGAM_RAMB_OFFSET_G;
+ uint32_t DWB_OGAM_RAMB_OFFSET_R;
+ uint32_t DWB_OGAM_RAMB_REGION_0_1;
+ uint32_t DWB_OGAM_RAMB_REGION_2_3;
+ uint32_t DWB_OGAM_RAMB_REGION_4_5;
+ uint32_t DWB_OGAM_RAMB_REGION_6_7;
+ uint32_t DWB_OGAM_RAMB_REGION_8_9;
+ uint32_t DWB_OGAM_RAMB_REGION_10_11;
+ uint32_t DWB_OGAM_RAMB_REGION_12_13;
+ uint32_t DWB_OGAM_RAMB_REGION_14_15;
+ uint32_t DWB_OGAM_RAMB_REGION_16_17;
+ uint32_t DWB_OGAM_RAMB_REGION_18_19;
+ uint32_t DWB_OGAM_RAMB_REGION_20_21;
+ uint32_t DWB_OGAM_RAMB_REGION_22_23;
+ uint32_t DWB_OGAM_RAMB_REGION_24_25;
+ uint32_t DWB_OGAM_RAMB_REGION_26_27;
+ uint32_t DWB_OGAM_RAMB_REGION_28_29;
+ uint32_t DWB_OGAM_RAMB_REGION_30_31;
+ uint32_t DWB_OGAM_RAMB_REGION_32_33;
+};
+
+/* Internal enums / structs */
+enum dwbscl_coef_filter_type_sel {
+ DWBSCL_COEF_RAM_FILTER_TYPE_VERT_RGB = 0,
+ DWBSCL_COEF_RAM_FILTER_TYPE_HORZ_RGB = 1
+};
+
+
+struct dcn30_dwbc_mask {
+ DWBC_REG_FIELD_LIST_DCN3_0(uint32_t);
+};
+
+struct dcn30_dwbc_shift {
+ DWBC_REG_FIELD_LIST_DCN3_0(uint8_t);
+};
+
+struct dcn30_dwbc {
+ struct dwbc base;
+ const struct dcn30_dwbc_registers *dwbc_regs;
+ const struct dcn30_dwbc_shift *dwbc_shift;
+ const struct dcn30_dwbc_mask *dwbc_mask;
+};
+
+void dcn30_dwbc_construct(struct dcn30_dwbc *dwbc30,
+ struct dc_context *ctx,
+ const struct dcn30_dwbc_registers *dwbc_regs,
+ const struct dcn30_dwbc_shift *dwbc_shift,
+ const struct dcn30_dwbc_mask *dwbc_mask,
+ int inst);
+
+bool dwb3_enable(struct dwbc *dwbc, struct dc_dwb_params *params);
+
+bool dwb3_disable(struct dwbc *dwbc);
+
+bool dwb3_update(struct dwbc *dwbc, struct dc_dwb_params *params);
+
+bool dwb3_is_enabled(struct dwbc *dwbc);
+
+void dwb3_set_stereo(struct dwbc *dwbc,
+ struct dwb_stereo_params *stereo_params);
+
+void dwb3_set_new_content(struct dwbc *dwbc,
+ bool is_new_content);
+
+void dwb3_config_fc(struct dwbc *dwbc,
+ struct dc_dwb_params *params);
+
+void dwb3_set_denorm(struct dwbc *dwbc, struct dc_dwb_params *params);
+
+void dwb3_program_hdr_mult(
+ struct dwbc *dwbc,
+ const struct dc_dwb_params *params);
+
+void dwb3_set_gamut_remap(
+ struct dwbc *dwbc,
+ const struct dc_dwb_params *params);
+
+bool dwb3_ogam_set_input_transfer_func(
+ struct dwbc *dwbc,
+ const struct dc_transfer_func *in_transfer_func_dwb_ogam);
+
+void dwb3_set_host_read_rate_control(struct dwbc *dwbc, bool host_read_delay);
+#endif
+
+
diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dwb_cm.c b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dwb_cm.c
new file mode 100644
index 000000000000..8593145379d9
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dwb_cm.c
@@ -0,0 +1,354 @@
+/*
+ * Copyright 2020 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#include "reg_helper.h"
+#include "fixed31_32.h"
+#include "resource.h"
+#include "basics/conversion.h"
+#include "dwb.h"
+#include "dcn30_dwb.h"
+#include "dcn30_cm_common.h"
+#include "dcn10/dcn10_cm_common.h"
+
+
+#define REG(reg)\
+ dwbc30->dwbc_regs->reg
+
+#define CTX \
+ dwbc30->base.ctx
+
+#undef FN
+#define FN(reg_name, field_name) \
+ dwbc30->dwbc_shift->field_name, dwbc30->dwbc_mask->field_name
+
+#define TO_DCN30_DWBC(dwbc_base) \
+ container_of(dwbc_base, struct dcn30_dwbc, base)
+
+static void dwb3_get_reg_field_ogam(struct dcn30_dwbc *dwbc30,
+ struct dcn3_xfer_func_reg *reg)
+{
+ reg->shifts.exp_region0_lut_offset = dwbc30->dwbc_shift->DWB_OGAM_RAMA_EXP_REGION0_LUT_OFFSET;
+ reg->masks.exp_region0_lut_offset = dwbc30->dwbc_mask->DWB_OGAM_RAMA_EXP_REGION0_LUT_OFFSET;
+ reg->shifts.exp_region0_num_segments = dwbc30->dwbc_shift->DWB_OGAM_RAMA_EXP_REGION0_NUM_SEGMENTS;
+ reg->masks.exp_region0_num_segments = dwbc30->dwbc_mask->DWB_OGAM_RAMA_EXP_REGION0_NUM_SEGMENTS;
+ reg->shifts.exp_region1_lut_offset = dwbc30->dwbc_shift->DWB_OGAM_RAMA_EXP_REGION1_LUT_OFFSET;
+ reg->masks.exp_region1_lut_offset = dwbc30->dwbc_mask->DWB_OGAM_RAMA_EXP_REGION1_LUT_OFFSET;
+ reg->shifts.exp_region1_num_segments = dwbc30->dwbc_shift->DWB_OGAM_RAMA_EXP_REGION1_NUM_SEGMENTS;
+ reg->masks.exp_region1_num_segments = dwbc30->dwbc_mask->DWB_OGAM_RAMA_EXP_REGION1_NUM_SEGMENTS;
+
+ reg->shifts.field_region_end = dwbc30->dwbc_shift->DWB_OGAM_RAMA_EXP_REGION_END_B;
+ reg->masks.field_region_end = dwbc30->dwbc_mask->DWB_OGAM_RAMA_EXP_REGION_END_B;
+ reg->shifts.field_region_end_slope = dwbc30->dwbc_shift->DWB_OGAM_RAMA_EXP_REGION_END_SLOPE_B;
+ reg->masks.field_region_end_slope = dwbc30->dwbc_mask->DWB_OGAM_RAMA_EXP_REGION_END_SLOPE_B;
+ reg->shifts.field_region_end_base = dwbc30->dwbc_shift->DWB_OGAM_RAMA_EXP_REGION_END_BASE_B;
+ reg->masks.field_region_end_base = dwbc30->dwbc_mask->DWB_OGAM_RAMA_EXP_REGION_END_BASE_B;
+ reg->shifts.field_region_linear_slope = dwbc30->dwbc_shift->DWB_OGAM_RAMA_EXP_REGION_START_SLOPE_B;
+ reg->masks.field_region_linear_slope = dwbc30->dwbc_mask->DWB_OGAM_RAMA_EXP_REGION_START_SLOPE_B;
+ reg->masks.field_offset = dwbc30->dwbc_mask->DWB_OGAM_RAMA_OFFSET_B;
+ reg->shifts.field_offset = dwbc30->dwbc_shift->DWB_OGAM_RAMA_OFFSET_B;
+ reg->shifts.exp_region_start = dwbc30->dwbc_shift->DWB_OGAM_RAMA_EXP_REGION_START_B;
+ reg->masks.exp_region_start = dwbc30->dwbc_mask->DWB_OGAM_RAMA_EXP_REGION_START_B;
+ reg->shifts.exp_resion_start_segment = dwbc30->dwbc_shift->DWB_OGAM_RAMA_EXP_REGION_START_SEGMENT_B;
+ reg->masks.exp_resion_start_segment = dwbc30->dwbc_mask->DWB_OGAM_RAMA_EXP_REGION_START_SEGMENT_B;
+}
+
+/*program dwb ogam RAM A*/
+static void dwb3_program_ogam_luta_settings(
+ struct dcn30_dwbc *dwbc30,
+ const struct pwl_params *params)
+{
+ struct dcn3_xfer_func_reg gam_regs;
+
+ dwb3_get_reg_field_ogam(dwbc30, &gam_regs);
+
+ gam_regs.start_cntl_b = REG(DWB_OGAM_RAMA_START_CNTL_B);
+ gam_regs.start_cntl_g = REG(DWB_OGAM_RAMA_START_CNTL_G);
+ gam_regs.start_cntl_r = REG(DWB_OGAM_RAMA_START_CNTL_R);
+ gam_regs.start_base_cntl_b = REG(DWB_OGAM_RAMA_START_BASE_CNTL_B);
+ gam_regs.start_base_cntl_g = REG(DWB_OGAM_RAMA_START_BASE_CNTL_G);
+ gam_regs.start_base_cntl_r = REG(DWB_OGAM_RAMA_START_BASE_CNTL_R);
+ gam_regs.start_slope_cntl_b = REG(DWB_OGAM_RAMA_START_SLOPE_CNTL_B);
+ gam_regs.start_slope_cntl_g = REG(DWB_OGAM_RAMA_START_SLOPE_CNTL_G);
+ gam_regs.start_slope_cntl_r = REG(DWB_OGAM_RAMA_START_SLOPE_CNTL_R);
+ gam_regs.start_end_cntl1_b = REG(DWB_OGAM_RAMA_END_CNTL1_B);
+ gam_regs.start_end_cntl2_b = REG(DWB_OGAM_RAMA_END_CNTL2_B);
+ gam_regs.start_end_cntl1_g = REG(DWB_OGAM_RAMA_END_CNTL1_G);
+ gam_regs.start_end_cntl2_g = REG(DWB_OGAM_RAMA_END_CNTL2_G);
+ gam_regs.start_end_cntl1_r = REG(DWB_OGAM_RAMA_END_CNTL1_R);
+ gam_regs.start_end_cntl2_r = REG(DWB_OGAM_RAMA_END_CNTL2_R);
+ gam_regs.offset_b = REG(DWB_OGAM_RAMA_OFFSET_B);
+ gam_regs.offset_g = REG(DWB_OGAM_RAMA_OFFSET_G);
+ gam_regs.offset_r = REG(DWB_OGAM_RAMA_OFFSET_R);
+ gam_regs.region_start = REG(DWB_OGAM_RAMA_REGION_0_1);
+ gam_regs.region_end = REG(DWB_OGAM_RAMA_REGION_32_33);
+ /*todo*/
+ cm_helper_program_gamcor_xfer_func(dwbc30->base.ctx, params, &gam_regs);
+}
+
+/*program dwb ogam RAM B*/
+static void dwb3_program_ogam_lutb_settings(
+ struct dcn30_dwbc *dwbc30,
+ const struct pwl_params *params)
+{
+ struct dcn3_xfer_func_reg gam_regs;
+
+ dwb3_get_reg_field_ogam(dwbc30, &gam_regs);
+
+ gam_regs.start_cntl_b = REG(DWB_OGAM_RAMB_START_CNTL_B);
+ gam_regs.start_cntl_g = REG(DWB_OGAM_RAMB_START_CNTL_G);
+ gam_regs.start_cntl_r = REG(DWB_OGAM_RAMB_START_CNTL_R);
+ gam_regs.start_base_cntl_b = REG(DWB_OGAM_RAMB_START_BASE_CNTL_B);
+ gam_regs.start_base_cntl_g = REG(DWB_OGAM_RAMB_START_BASE_CNTL_G);
+ gam_regs.start_base_cntl_r = REG(DWB_OGAM_RAMB_START_BASE_CNTL_R);
+ gam_regs.start_slope_cntl_b = REG(DWB_OGAM_RAMB_START_SLOPE_CNTL_B);
+ gam_regs.start_slope_cntl_g = REG(DWB_OGAM_RAMB_START_SLOPE_CNTL_G);
+ gam_regs.start_slope_cntl_r = REG(DWB_OGAM_RAMB_START_SLOPE_CNTL_R);
+ gam_regs.start_end_cntl1_b = REG(DWB_OGAM_RAMB_END_CNTL1_B);
+ gam_regs.start_end_cntl2_b = REG(DWB_OGAM_RAMB_END_CNTL2_B);
+ gam_regs.start_end_cntl1_g = REG(DWB_OGAM_RAMB_END_CNTL1_G);
+ gam_regs.start_end_cntl2_g = REG(DWB_OGAM_RAMB_END_CNTL2_G);
+ gam_regs.start_end_cntl1_r = REG(DWB_OGAM_RAMB_END_CNTL1_R);
+ gam_regs.start_end_cntl2_r = REG(DWB_OGAM_RAMB_END_CNTL2_R);
+ gam_regs.offset_b = REG(DWB_OGAM_RAMB_OFFSET_B);
+ gam_regs.offset_g = REG(DWB_OGAM_RAMB_OFFSET_G);
+ gam_regs.offset_r = REG(DWB_OGAM_RAMB_OFFSET_R);
+ gam_regs.region_start = REG(DWB_OGAM_RAMB_REGION_0_1);
+ gam_regs.region_end = REG(DWB_OGAM_RAMB_REGION_32_33);
+
+ cm_helper_program_gamcor_xfer_func(dwbc30->base.ctx, params, &gam_regs);
+}
+
+static enum dc_lut_mode dwb3_get_ogam_current(
+ struct dcn30_dwbc *dwbc30)
+{
+ enum dc_lut_mode mode;
+ uint32_t state_mode;
+ uint32_t ram_select;
+
+ REG_GET(DWB_OGAM_CONTROL,
+ DWB_OGAM_MODE, &state_mode);
+ REG_GET(DWB_OGAM_CONTROL,
+ DWB_OGAM_SELECT, &ram_select);
+
+ if (state_mode == 0) {
+ mode = LUT_BYPASS;
+ } else if (state_mode == 2) {
+ if (ram_select == 0)
+ mode = LUT_RAM_A;
+ else
+ mode = LUT_RAM_B;
+ } else {
+ // Reserved value
+ mode = LUT_BYPASS;
+ BREAK_TO_DEBUGGER();
+ return mode;
+ }
+ return mode;
+}
+
+static void dwb3_configure_ogam_lut(
+ struct dcn30_dwbc *dwbc30,
+ bool is_ram_a)
+{
+ REG_UPDATE(DWB_OGAM_LUT_CONTROL,
+ DWB_OGAM_LUT_READ_COLOR_SEL, 7);
+ REG_UPDATE(DWB_OGAM_CONTROL,
+ DWB_OGAM_SELECT, is_ram_a == true ? 0 : 1);
+ REG_SET(DWB_OGAM_LUT_INDEX, 0, DWB_OGAM_LUT_INDEX, 0);
+}
+
+static void dwb3_program_ogam_pwl(struct dcn30_dwbc *dwbc30,
+ const struct pwl_result_data *rgb,
+ uint32_t num)
+{
+ uint32_t i;
+
+ // triple base implementation
+ for (i = 0; i < num/2; i++) {
+ REG_SET(DWB_OGAM_LUT_DATA, 0, DWB_OGAM_LUT_DATA, rgb[2*i+0].red_reg);
+ REG_SET(DWB_OGAM_LUT_DATA, 0, DWB_OGAM_LUT_DATA, rgb[2*i+0].green_reg);
+ REG_SET(DWB_OGAM_LUT_DATA, 0, DWB_OGAM_LUT_DATA, rgb[2*i+0].blue_reg);
+ REG_SET(DWB_OGAM_LUT_DATA, 0, DWB_OGAM_LUT_DATA, rgb[2*i+1].red_reg);
+ REG_SET(DWB_OGAM_LUT_DATA, 0, DWB_OGAM_LUT_DATA, rgb[2*i+1].green_reg);
+ REG_SET(DWB_OGAM_LUT_DATA, 0, DWB_OGAM_LUT_DATA, rgb[2*i+1].blue_reg);
+ REG_SET(DWB_OGAM_LUT_DATA, 0, DWB_OGAM_LUT_DATA, rgb[2*i+2].red_reg);
+ REG_SET(DWB_OGAM_LUT_DATA, 0, DWB_OGAM_LUT_DATA, rgb[2*i+2].green_reg);
+ REG_SET(DWB_OGAM_LUT_DATA, 0, DWB_OGAM_LUT_DATA, rgb[2*i+2].blue_reg);
+ }
+}
+
+static bool dwb3_program_ogam_lut(
+ struct dcn30_dwbc *dwbc30,
+ const struct pwl_params *params)
+{
+ enum dc_lut_mode current_mode;
+ enum dc_lut_mode next_mode;
+
+ if (params == NULL) {
+ REG_SET(DWB_OGAM_CONTROL, 0, DWB_OGAM_MODE, 0);
+ return false;
+ }
+
+ current_mode = dwb3_get_ogam_current(dwbc30);
+ if (current_mode == LUT_BYPASS || current_mode == LUT_RAM_A)
+ next_mode = LUT_RAM_B;
+ else
+ next_mode = LUT_RAM_A;
+
+ dwb3_configure_ogam_lut(dwbc30, next_mode == LUT_RAM_A ? true : false);
+
+ if (next_mode == LUT_RAM_A)
+ dwb3_program_ogam_luta_settings(dwbc30, params);
+ else
+ dwb3_program_ogam_lutb_settings(dwbc30, params);
+
+ dwb3_program_ogam_pwl(
+ dwbc30, params->rgb_resulted, params->hw_points_num);
+
+ REG_SET(DWB_OGAM_CONTROL, 0, DWB_OGAM_MODE, 2);
+ REG_SET(DWB_OGAM_CONTROL, 0, DWB_OGAM_SELECT, next_mode == LUT_RAM_A ? 0 : 1);
+
+ return true;
+}
+
+bool dwb3_ogam_set_input_transfer_func(
+ struct dwbc *dwbc,
+ const struct dc_transfer_func *in_transfer_func_dwb_ogam)
+{
+ struct dcn30_dwbc *dwbc30 = TO_DCN30_DWBC(dwbc);
+ bool result = false;
+ struct pwl_params *dwb_ogam_lut = NULL;
+
+ if (in_transfer_func_dwb_ogam == NULL)
+ return result;
+
+ dwb_ogam_lut = kzalloc(sizeof(*dwb_ogam_lut), GFP_KERNEL);
+
+ if (dwb_ogam_lut) {
+ cm_helper_translate_curve_to_hw_format(
+ in_transfer_func_dwb_ogam,
+ dwb_ogam_lut, false);
+
+ result = dwb3_program_ogam_lut(
+ dwbc30,
+ dwb_ogam_lut);
+ kfree(dwb_ogam_lut);
+ dwb_ogam_lut = NULL;
+ }
+
+ return result;
+}
+
+static void dwb3_program_gamut_remap(
+ struct dwbc *dwbc,
+ const uint16_t *regval,
+ enum cm_gamut_coef_format coef_format,
+ enum cm_gamut_remap_select select)
+{
+ struct dcn30_dwbc *dwbc30 = TO_DCN30_DWBC(dwbc);
+
+ struct color_matrices_reg gam_regs;
+
+ REG_UPDATE(DWB_GAMUT_REMAP_COEF_FORMAT, DWB_GAMUT_REMAP_COEF_FORMAT, coef_format);
+
+ if (regval == NULL || select == CM_GAMUT_REMAP_MODE_BYPASS) {
+ REG_SET(DWB_GAMUT_REMAP_MODE, 0,
+ DWB_GAMUT_REMAP_MODE, 0);
+ return;
+ }
+
+ switch (select) {
+ case CM_GAMUT_REMAP_MODE_RAMA_COEFF:
+ gam_regs.csc_c11_c12 = REG(DWB_GAMUT_REMAPA_C11_C12);
+ gam_regs.csc_c33_c34 = REG(DWB_GAMUT_REMAPA_C33_C34);
+
+ cm_helper_program_color_matrices(
+ dwbc30->base.ctx,
+ regval,
+ &gam_regs);
+ break;
+ case CM_GAMUT_REMAP_MODE_RAMB_COEFF:
+ gam_regs.csc_c11_c12 = REG(DWB_GAMUT_REMAPB_C11_C12);
+ gam_regs.csc_c33_c34 = REG(DWB_GAMUT_REMAPB_C33_C34);
+
+ cm_helper_program_color_matrices(
+ dwbc30->base.ctx,
+ regval,
+ &gam_regs);
+ break;
+ case CM_GAMUT_REMAP_MODE_RESERVED:
+ /* should never happen, bug */
+ BREAK_TO_DEBUGGER();
+ return;
+ default:
+ break;
+ }
+
+ REG_SET(DWB_GAMUT_REMAP_MODE, 0,
+ DWB_GAMUT_REMAP_MODE, select);
+
+}
+
+void dwb3_set_gamut_remap(
+ struct dwbc *dwbc,
+ const struct dc_dwb_params *params)
+{
+ struct dcn30_dwbc *dwbc30 = TO_DCN30_DWBC(dwbc);
+ struct cm_grph_csc_adjustment adjust = params->csc_params;
+ int i = 0;
+
+ if (adjust.gamut_adjust_type != CM_GAMUT_ADJUST_TYPE_SW) {
+ /* Bypass if type is bypass or hw */
+ dwb3_program_gamut_remap(dwbc, NULL, adjust.gamut_coef_format, CM_GAMUT_REMAP_MODE_BYPASS);
+ } else {
+ struct fixed31_32 arr_matrix[12];
+ uint16_t arr_reg_val[12];
+ unsigned int current_mode;
+
+ for (i = 0; i < 12; i++)
+ arr_matrix[i] = adjust.temperature_matrix[i];
+
+ convert_float_matrix(arr_reg_val, arr_matrix, 12);
+
+ REG_GET(DWB_GAMUT_REMAP_MODE, DWB_GAMUT_REMAP_MODE_CURRENT, &current_mode);
+
+ if (current_mode == CM_GAMUT_REMAP_MODE_RAMA_COEFF) {
+ dwb3_program_gamut_remap(dwbc, arr_reg_val,
+ adjust.gamut_coef_format, CM_GAMUT_REMAP_MODE_RAMB_COEFF);
+ } else {
+ dwb3_program_gamut_remap(dwbc, arr_reg_val,
+ adjust.gamut_coef_format, CM_GAMUT_REMAP_MODE_RAMA_COEFF);
+ }
+ }
+}
+
+void dwb3_program_hdr_mult(
+ struct dwbc *dwbc,
+ const struct dc_dwb_params *params)
+{
+ struct dcn30_dwbc *dwbc30 = TO_DCN30_DWBC(dwbc);
+
+ REG_UPDATE(DWB_HDR_MULT_COEF, DWB_HDR_MULT_COEF, params->hdr_mult);
+}
diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hubbub.c b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hubbub.c
new file mode 100644
index 000000000000..982732dec133
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hubbub.c
@@ -0,0 +1,417 @@
+/*
+ * Copyright 2020 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+
+#include "dm_services.h"
+#include "reg_helper.h"
+#include "dcn30_hubbub.h"
+
+
+#define CTX \
+ hubbub1->base.ctx
+#define DC_LOGGER \
+ hubbub1->base.ctx->logger
+#define REG(reg)\
+ hubbub1->regs->reg
+
+#undef FN
+#define FN(reg_name, field_name) \
+ hubbub1->shifts->field_name, hubbub1->masks->field_name
+
+#ifdef NUM_VMID
+#undef NUM_VMID
+#endif
+#define NUM_VMID 16
+
+
+static uint32_t convert_and_clamp(
+ uint32_t wm_ns,
+ uint32_t refclk_mhz,
+ uint32_t clamp_value)
+{
+ uint32_t ret_val = 0;
+ ret_val = wm_ns * refclk_mhz;
+ ret_val /= 1000;
+
+ if (ret_val > clamp_value)
+ ret_val = clamp_value;
+
+ return ret_val;
+}
+
+int hubbub3_init_dchub_sys_ctx(struct hubbub *hubbub,
+ struct dcn_hubbub_phys_addr_config *pa_config)
+{
+ struct dcn20_hubbub *hubbub1 = TO_DCN20_HUBBUB(hubbub);
+ struct dcn_vmid_page_table_config phys_config;
+
+ REG_SET(DCN_VM_FB_LOCATION_BASE, 0,
+ FB_BASE, pa_config->system_aperture.fb_base >> 24);
+ REG_SET(DCN_VM_FB_LOCATION_TOP, 0,
+ FB_TOP, pa_config->system_aperture.fb_top >> 24);
+ REG_SET(DCN_VM_FB_OFFSET, 0,
+ FB_OFFSET, pa_config->system_aperture.fb_offset >> 24);
+ REG_SET(DCN_VM_AGP_BOT, 0,
+ AGP_BOT, pa_config->system_aperture.agp_bot >> 24);
+ REG_SET(DCN_VM_AGP_TOP, 0,
+ AGP_TOP, pa_config->system_aperture.agp_top >> 24);
+ REG_SET(DCN_VM_AGP_BASE, 0,
+ AGP_BASE, pa_config->system_aperture.agp_base >> 24);
+
+ if (pa_config->gart_config.page_table_start_addr != pa_config->gart_config.page_table_end_addr) {
+ phys_config.page_table_start_addr = pa_config->gart_config.page_table_start_addr >> 12;
+ phys_config.page_table_end_addr = pa_config->gart_config.page_table_end_addr >> 12;
+ phys_config.page_table_base_addr = pa_config->gart_config.page_table_base_addr;
+ phys_config.depth = 0;
+ phys_config.block_size = 0;
+ // Init VMID 0 based on PA config
+ dcn20_vmid_setup(&hubbub1->vmid[0], &phys_config);
+ }
+
+ return NUM_VMID;
+}
+
+bool hubbub3_program_watermarks(
+ struct hubbub *hubbub,
+ struct dcn_watermark_set *watermarks,
+ unsigned int refclk_mhz,
+ bool safe_to_lower)
+{
+ struct dcn20_hubbub *hubbub1 = TO_DCN20_HUBBUB(hubbub);
+ bool wm_pending = false;
+
+ if (hubbub21_program_urgent_watermarks(hubbub, watermarks, refclk_mhz, safe_to_lower))
+ wm_pending = true;
+
+ if (hubbub21_program_stutter_watermarks(hubbub, watermarks, refclk_mhz, safe_to_lower))
+ wm_pending = true;
+
+ if (hubbub21_program_pstate_watermarks(hubbub, watermarks, refclk_mhz, safe_to_lower))
+ wm_pending = true;
+
+ /*
+ * The DCHub arbiter has a mechanism to dynamically rate limit the DCHub request stream to the fabric.
+ * If the memory controller is fully utilized and the DCHub requestors are
+ * well ahead of their amortized schedule, then it is safe to prevent the next winner
+ * from being committed and sent to the fabric.
+ * The utilization of the memory controller is approximated by ensuring that
+ * the number of outstanding requests is greater than a threshold specified
+ * by the ARB_MIN_REQ_OUTSTANDING. To determine that the DCHub requestors are well ahead of the amortized schedule,
+ * the slack of the next winner is compared with the ARB_SAT_LEVEL in DLG RefClk cycles.
+ *
+ * TODO: Revisit request limit after figure out right number. request limit for Renoir isn't decided yet, set maximum value (0x1FF)
+ * to turn off it for now.
+ */
+ REG_SET(DCHUBBUB_ARB_SAT_LEVEL, 0,
+ DCHUBBUB_ARB_SAT_LEVEL, 60 * refclk_mhz);
+ REG_UPDATE(DCHUBBUB_ARB_DF_REQ_OUTSTAND,
+ DCHUBBUB_ARB_MIN_REQ_OUTSTAND, 0x1FF);
+
+ hubbub1_allow_self_refresh_control(hubbub, !hubbub->ctx->dc->debug.disable_stutter);
+
+ return wm_pending;
+}
+
+bool hubbub3_dcc_support_swizzle(
+ enum swizzle_mode_values swizzle,
+ unsigned int bytes_per_element,
+ enum segment_order *segment_order_horz,
+ enum segment_order *segment_order_vert)
+{
+ bool standard_swizzle = false;
+ bool display_swizzle = false;
+ bool render_swizzle = false;
+
+ switch (swizzle) {
+ case DC_SW_4KB_S:
+ case DC_SW_64KB_S:
+ case DC_SW_VAR_S:
+ case DC_SW_4KB_S_X:
+ case DC_SW_64KB_S_X:
+ case DC_SW_VAR_S_X:
+ standard_swizzle = true;
+ break;
+ case DC_SW_4KB_R:
+ case DC_SW_64KB_R:
+ case DC_SW_VAR_R:
+ case DC_SW_4KB_R_X:
+ case DC_SW_64KB_R_X:
+ case DC_SW_VAR_R_X:
+ render_swizzle = true;
+ break;
+ case DC_SW_4KB_D:
+ case DC_SW_64KB_D:
+ case DC_SW_VAR_D:
+ case DC_SW_4KB_D_X:
+ case DC_SW_64KB_D_X:
+ case DC_SW_VAR_D_X:
+ display_swizzle = true;
+ break;
+ default:
+ break;
+ }
+
+ if (standard_swizzle) {
+ if (bytes_per_element == 1) {
+ *segment_order_horz = segment_order__contiguous;
+ *segment_order_vert = segment_order__na;
+ return true;
+ }
+ if (bytes_per_element == 2) {
+ *segment_order_horz = segment_order__non_contiguous;
+ *segment_order_vert = segment_order__contiguous;
+ return true;
+ }
+ if (bytes_per_element == 4) {
+ *segment_order_horz = segment_order__non_contiguous;
+ *segment_order_vert = segment_order__contiguous;
+ return true;
+ }
+ if (bytes_per_element == 8) {
+ *segment_order_horz = segment_order__na;
+ *segment_order_vert = segment_order__contiguous;
+ return true;
+ }
+ }
+ if (render_swizzle) {
+ if (bytes_per_element == 1) {
+ *segment_order_horz = segment_order__contiguous;
+ *segment_order_vert = segment_order__na;
+ return true;
+ }
+ if (bytes_per_element == 2) {
+ *segment_order_horz = segment_order__non_contiguous;
+ *segment_order_vert = segment_order__contiguous;
+ return true;
+ }
+ if (bytes_per_element == 4) {
+ *segment_order_horz = segment_order__contiguous;
+ *segment_order_vert = segment_order__non_contiguous;
+ return true;
+ }
+ if (bytes_per_element == 8) {
+ *segment_order_horz = segment_order__contiguous;
+ *segment_order_vert = segment_order__non_contiguous;
+ return true;
+ }
+ }
+ if (display_swizzle && bytes_per_element == 8) {
+ *segment_order_horz = segment_order__contiguous;
+ *segment_order_vert = segment_order__non_contiguous;
+ return true;
+ }
+
+ return false;
+}
+
+static void hubbub3_get_blk256_size(unsigned int *blk256_width, unsigned int *blk256_height,
+ unsigned int bytes_per_element)
+{
+ /* copied from DML. might want to refactor DML to leverage from DML */
+ /* DML : get_blk256_size */
+ if (bytes_per_element == 1) {
+ *blk256_width = 16;
+ *blk256_height = 16;
+ } else if (bytes_per_element == 2) {
+ *blk256_width = 16;
+ *blk256_height = 8;
+ } else if (bytes_per_element == 4) {
+ *blk256_width = 8;
+ *blk256_height = 8;
+ } else if (bytes_per_element == 8) {
+ *blk256_width = 8;
+ *blk256_height = 4;
+ }
+}
+
+static void hubbub3_det_request_size(
+ unsigned int detile_buf_size,
+ unsigned int height,
+ unsigned int width,
+ unsigned int bpe,
+ bool *req128_horz_wc,
+ bool *req128_vert_wc)
+{
+ unsigned int blk256_height = 0;
+ unsigned int blk256_width = 0;
+ unsigned int swath_bytes_horz_wc, swath_bytes_vert_wc;
+
+ hubbub3_get_blk256_size(&blk256_width, &blk256_height, bpe);
+
+ swath_bytes_horz_wc = width * blk256_height * bpe;
+ swath_bytes_vert_wc = height * blk256_width * bpe;
+
+ *req128_horz_wc = (2 * swath_bytes_horz_wc <= detile_buf_size) ?
+ false : /* full 256B request */
+ true; /* half 128b request */
+
+ *req128_vert_wc = (2 * swath_bytes_vert_wc <= detile_buf_size) ?
+ false : /* full 256B request */
+ true; /* half 128b request */
+}
+
+bool hubbub3_get_dcc_compression_cap(struct hubbub *hubbub,
+ const struct dc_dcc_surface_param *input,
+ struct dc_surface_dcc_cap *output)
+{
+ struct dc *dc = hubbub->ctx->dc;
+ /* implement section 1.6.2.1 of DCN1_Programming_Guide.docx */
+ enum dcc_control dcc_control;
+ unsigned int bpe;
+ enum segment_order segment_order_horz, segment_order_vert;
+ bool req128_horz_wc, req128_vert_wc;
+
+ memset(output, 0, sizeof(*output));
+
+ if (dc->debug.disable_dcc == DCC_DISABLE)
+ return false;
+
+ if (!hubbub->funcs->dcc_support_pixel_format(input->format,
+ &bpe))
+ return false;
+
+ if (!hubbub->funcs->dcc_support_swizzle(input->swizzle_mode, bpe,
+ &segment_order_horz, &segment_order_vert))
+ return false;
+
+ hubbub3_det_request_size(TO_DCN20_HUBBUB(hubbub)->detile_buf_size,
+ input->surface_size.height, input->surface_size.width,
+ bpe, &req128_horz_wc, &req128_vert_wc);
+
+ if (!req128_horz_wc && !req128_vert_wc) {
+ dcc_control = dcc_control__256_256_xxx;
+ } else if (input->scan == SCAN_DIRECTION_HORIZONTAL) {
+ if (!req128_horz_wc)
+ dcc_control = dcc_control__256_256_xxx;
+ else if (segment_order_horz == segment_order__contiguous)
+ dcc_control = dcc_control__128_128_xxx;
+ else
+ dcc_control = dcc_control__256_64_64;
+ } else if (input->scan == SCAN_DIRECTION_VERTICAL) {
+ if (!req128_vert_wc)
+ dcc_control = dcc_control__256_256_xxx;
+ else if (segment_order_vert == segment_order__contiguous)
+ dcc_control = dcc_control__128_128_xxx;
+ else
+ dcc_control = dcc_control__256_64_64;
+ } else {
+ if ((req128_horz_wc &&
+ segment_order_horz == segment_order__non_contiguous) ||
+ (req128_vert_wc &&
+ segment_order_vert == segment_order__non_contiguous))
+ /* access_dir not known, must use most constraining */
+ dcc_control = dcc_control__256_64_64;
+ else
+ /* reg128 is true for either horz and vert
+ * but segment_order is contiguous
+ */
+ dcc_control = dcc_control__128_128_xxx;
+ }
+
+ /* Exception for 64KB_R_X */
+ if ((bpe == 2) && (input->swizzle_mode == DC_SW_64KB_R_X))
+ dcc_control = dcc_control__128_128_xxx;
+
+ if (dc->debug.disable_dcc == DCC_HALF_REQ_DISALBE &&
+ dcc_control != dcc_control__256_256_xxx)
+ return false;
+
+ switch (dcc_control) {
+ case dcc_control__256_256_xxx:
+ output->grph.rgb.max_uncompressed_blk_size = 256;
+ output->grph.rgb.max_compressed_blk_size = 256;
+ output->grph.rgb.independent_64b_blks = false;
+ output->grph.rgb.dcc_controls.dcc_256_256_unconstrained = 1;
+ output->grph.rgb.dcc_controls.dcc_256_128_128 = 1;
+ break;
+ case dcc_control__128_128_xxx:
+ output->grph.rgb.max_uncompressed_blk_size = 128;
+ output->grph.rgb.max_compressed_blk_size = 128;
+ output->grph.rgb.independent_64b_blks = false;
+ output->grph.rgb.dcc_controls.dcc_128_128_uncontrained = 1;
+ output->grph.rgb.dcc_controls.dcc_256_128_128 = 1;
+ break;
+ case dcc_control__256_64_64:
+ output->grph.rgb.max_uncompressed_blk_size = 256;
+ output->grph.rgb.max_compressed_blk_size = 64;
+ output->grph.rgb.independent_64b_blks = true;
+ output->grph.rgb.dcc_controls.dcc_256_64_64 = 1;
+ break;
+ case dcc_control__256_128_128:
+ output->grph.rgb.max_uncompressed_blk_size = 256;
+ output->grph.rgb.max_compressed_blk_size = 128;
+ output->grph.rgb.independent_64b_blks = false;
+ output->grph.rgb.dcc_controls.dcc_256_128_128 = 1;
+ break;
+ }
+ output->capable = true;
+ output->const_color_support = true;
+
+ return true;
+}
+
+void hubbub3_force_wm_propagate_to_pipes(struct hubbub *hubbub)
+{
+ struct dcn20_hubbub *hubbub1 = TO_DCN20_HUBBUB(hubbub);
+ uint32_t refclk_mhz = hubbub->ctx->dc->res_pool->ref_clocks.dchub_ref_clock_inKhz / 1000;
+ uint32_t prog_wm_value = convert_and_clamp(hubbub1->watermarks.a.urgent_ns,
+ refclk_mhz, 0x1fffff);
+
+ REG_SET_2(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_A, 0,
+ DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_A, prog_wm_value,
+ DCHUBBUB_ARB_VM_ROW_URGENCY_WATERMARK_A, prog_wm_value);
+}
+
+static const struct hubbub_funcs hubbub30_funcs = {
+ .update_dchub = hubbub2_update_dchub,
+ .init_dchub_sys_ctx = hubbub3_init_dchub_sys_ctx,
+ .init_vm_ctx = hubbub2_init_vm_ctx,
+ .dcc_support_swizzle = hubbub3_dcc_support_swizzle,
+ .dcc_support_pixel_format = hubbub2_dcc_support_pixel_format,
+ .get_dcc_compression_cap = hubbub3_get_dcc_compression_cap,
+ .wm_read_state = hubbub21_wm_read_state,
+ .get_dchub_ref_freq = hubbub2_get_dchub_ref_freq,
+ .program_watermarks = hubbub3_program_watermarks,
+ .allow_self_refresh_control = hubbub1_allow_self_refresh_control,
+ .is_allow_self_refresh_enabled = hubbub1_is_allow_self_refresh_enabled,
+ .force_wm_propagate_to_pipes = hubbub3_force_wm_propagate_to_pipes,
+};
+
+void hubbub3_construct(struct dcn20_hubbub *hubbub3,
+ struct dc_context *ctx,
+ const struct dcn_hubbub_registers *hubbub_regs,
+ const struct dcn_hubbub_shift *hubbub_shift,
+ const struct dcn_hubbub_mask *hubbub_mask)
+{
+ hubbub3->base.ctx = ctx;
+ hubbub3->base.funcs = &hubbub30_funcs;
+ hubbub3->regs = hubbub_regs;
+ hubbub3->shifts = hubbub_shift;
+ hubbub3->masks = hubbub_mask;
+
+ hubbub3->debug_test_index_pstate = 0xB;
+ hubbub3->detile_buf_size = 184 * 1024; /* 184KB for DCN3 */
+}
+
diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hubbub.h b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hubbub.h
new file mode 100644
index 000000000000..790baa00672b
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hubbub.h
@@ -0,0 +1,119 @@
+/*
+ * Copyright 2020 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef __DC_HUBBUB_DCN30_H__
+#define __DC_HUBBUB_DCN30_H__
+
+#include "dcn21/dcn21_hubbub.h"
+
+#define HUBBUB_REG_LIST_DCN3AG(id)\
+ HUBBUB_REG_LIST_DCN21()
+
+#define HUBBUB_MASK_SH_LIST_DCN3AG(mask_sh)\
+ HUBBUB_MASK_SH_LIST_DCN21(mask_sh)
+
+#define HUBBUB_REG_LIST_DCN30(id)\
+ HUBBUB_REG_LIST_DCN20_COMMON(), \
+ HUBBUB_SR_WATERMARK_REG_LIST(), \
+ SR(DCHUBBUB_ARB_FRAC_URG_BW_NOM_A),\
+ SR(DCHUBBUB_ARB_FRAC_URG_BW_NOM_B),\
+ SR(DCHUBBUB_ARB_FRAC_URG_BW_NOM_C),\
+ SR(DCHUBBUB_ARB_FRAC_URG_BW_NOM_D),\
+ SR(DCHUBBUB_ARB_FRAC_URG_BW_FLIP_A),\
+ SR(DCHUBBUB_ARB_FRAC_URG_BW_FLIP_B),\
+ SR(DCHUBBUB_ARB_FRAC_URG_BW_FLIP_C),\
+ SR(DCHUBBUB_ARB_FRAC_URG_BW_FLIP_D),\
+ SR(DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_A),\
+ SR(DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_B),\
+ SR(DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_C),\
+ SR(DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_D)
+
+#define HUBBUB_MASK_SH_LIST_DCN30(mask_sh)\
+ HUBBUB_MASK_SH_LIST_DCN_COMMON(mask_sh), \
+ HUBBUB_MASK_SH_LIST_STUTTER(mask_sh), \
+ HUBBUB_SF(DCHUBBUB_GLOBAL_TIMER_CNTL, DCHUBBUB_GLOBAL_TIMER_REFDIV, mask_sh), \
+ HUBBUB_SF(DCN_VM_FB_LOCATION_BASE, FB_BASE, mask_sh), \
+ HUBBUB_SF(DCN_VM_FB_LOCATION_TOP, FB_TOP, mask_sh), \
+ HUBBUB_SF(DCN_VM_FB_OFFSET, FB_OFFSET, mask_sh), \
+ HUBBUB_SF(DCN_VM_AGP_BOT, AGP_BOT, mask_sh), \
+ HUBBUB_SF(DCN_VM_AGP_TOP, AGP_TOP, mask_sh), \
+ HUBBUB_SF(DCN_VM_AGP_BASE, AGP_BASE, mask_sh), \
+ HUBBUB_SF(DCHUBBUB_ARB_FRAC_URG_BW_FLIP_A, DCHUBBUB_ARB_FRAC_URG_BW_FLIP_A, mask_sh), \
+ HUBBUB_SF(DCHUBBUB_ARB_FRAC_URG_BW_FLIP_B, DCHUBBUB_ARB_FRAC_URG_BW_FLIP_B, mask_sh), \
+ HUBBUB_SF(DCHUBBUB_ARB_FRAC_URG_BW_FLIP_C, DCHUBBUB_ARB_FRAC_URG_BW_FLIP_C, mask_sh), \
+ HUBBUB_SF(DCHUBBUB_ARB_FRAC_URG_BW_FLIP_D, DCHUBBUB_ARB_FRAC_URG_BW_FLIP_D, mask_sh), \
+ HUBBUB_SF(DCHUBBUB_ARB_FRAC_URG_BW_NOM_A, DCHUBBUB_ARB_FRAC_URG_BW_NOM_A, mask_sh), \
+ HUBBUB_SF(DCHUBBUB_ARB_FRAC_URG_BW_NOM_B, DCHUBBUB_ARB_FRAC_URG_BW_NOM_B, mask_sh), \
+ HUBBUB_SF(DCHUBBUB_ARB_FRAC_URG_BW_NOM_C, DCHUBBUB_ARB_FRAC_URG_BW_NOM_C, mask_sh), \
+ HUBBUB_SF(DCHUBBUB_ARB_FRAC_URG_BW_NOM_D, DCHUBBUB_ARB_FRAC_URG_BW_NOM_D, mask_sh), \
+ HUBBUB_SF(DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_A, DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_A, mask_sh), \
+ HUBBUB_SF(DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_B, DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_B, mask_sh), \
+ HUBBUB_SF(DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_C, DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_C, mask_sh), \
+ HUBBUB_SF(DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_D, DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_D, mask_sh), \
+ HUBBUB_SF(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_A, DCHUBBUB_ARB_VM_ROW_URGENCY_WATERMARK_A, mask_sh), \
+ HUBBUB_SF(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_B, DCHUBBUB_ARB_VM_ROW_URGENCY_WATERMARK_B, mask_sh), \
+ HUBBUB_SF(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_C, DCHUBBUB_ARB_VM_ROW_URGENCY_WATERMARK_C, mask_sh), \
+ HUBBUB_SF(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_D, DCHUBBUB_ARB_VM_ROW_URGENCY_WATERMARK_D, mask_sh), \
+ HUBBUB_SF(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_A, DCHUBBUB_ARB_VM_ROW_ALLOW_SR_ENTER_WATERMARK_A, mask_sh), \
+ HUBBUB_SF(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_B, DCHUBBUB_ARB_VM_ROW_ALLOW_SR_ENTER_WATERMARK_B, mask_sh), \
+ HUBBUB_SF(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_C, DCHUBBUB_ARB_VM_ROW_ALLOW_SR_ENTER_WATERMARK_C, mask_sh), \
+ HUBBUB_SF(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_D, DCHUBBUB_ARB_VM_ROW_ALLOW_SR_ENTER_WATERMARK_D, mask_sh), \
+ HUBBUB_SF(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_A, DCHUBBUB_ARB_VM_ROW_ALLOW_SR_EXIT_WATERMARK_A, mask_sh), \
+ HUBBUB_SF(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_B, DCHUBBUB_ARB_VM_ROW_ALLOW_SR_EXIT_WATERMARK_B, mask_sh), \
+ HUBBUB_SF(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_C, DCHUBBUB_ARB_VM_ROW_ALLOW_SR_EXIT_WATERMARK_C, mask_sh), \
+ HUBBUB_SF(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_D, DCHUBBUB_ARB_VM_ROW_ALLOW_SR_EXIT_WATERMARK_D, mask_sh), \
+ HUBBUB_SF(DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_A, DCHUBBUB_ARB_VM_ROW_ALLOW_DRAM_CLK_CHANGE_WATERMARK_A, mask_sh), \
+ HUBBUB_SF(DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_B, DCHUBBUB_ARB_VM_ROW_ALLOW_DRAM_CLK_CHANGE_WATERMARK_B, mask_sh), \
+ HUBBUB_SF(DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_C, DCHUBBUB_ARB_VM_ROW_ALLOW_DRAM_CLK_CHANGE_WATERMARK_C, mask_sh), \
+ HUBBUB_SF(DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_D, DCHUBBUB_ARB_VM_ROW_ALLOW_DRAM_CLK_CHANGE_WATERMARK_D, mask_sh)
+
+void hubbub3_construct(struct dcn20_hubbub *hubbub3,
+ struct dc_context *ctx,
+ const struct dcn_hubbub_registers *hubbub_regs,
+ const struct dcn_hubbub_shift *hubbub_shift,
+ const struct dcn_hubbub_mask *hubbub_mask);
+
+int hubbub3_init_dchub_sys_ctx(struct hubbub *hubbub,
+ struct dcn_hubbub_phys_addr_config *pa_config);
+
+bool hubbub3_dcc_support_swizzle(
+ enum swizzle_mode_values swizzle,
+ unsigned int bytes_per_element,
+ enum segment_order *segment_order_horz,
+ enum segment_order *segment_order_vert);
+
+void hubbub3_force_wm_propagate_to_pipes(struct hubbub *hubbub);
+
+bool hubbub3_get_dcc_compression_cap(struct hubbub *hubbub,
+ const struct dc_dcc_surface_param *input,
+ struct dc_surface_dcc_cap *output);
+
+bool hubbub3_program_watermarks(
+ struct hubbub *hubbub,
+ struct dcn_watermark_set *watermarks,
+ unsigned int refclk_mhz,
+ bool safe_to_lower);
+
+#endif
diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hubp.c b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hubp.c
new file mode 100644
index 000000000000..af462fe4260d
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hubp.c
@@ -0,0 +1,532 @@
+/*
+ * Copyright 2020 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#include "dcn30_hubp.h"
+
+#include "dm_services.h"
+#include "dce_calcs.h"
+#include "reg_helper.h"
+#include "basics/conversion.h"
+#include "dcn20/dcn20_hubp.h"
+#include "dcn21/dcn21_hubp.h"
+
+#define REG(reg)\
+ hubp2->hubp_regs->reg
+
+#define CTX \
+ hubp2->base.ctx
+
+#undef FN
+#define FN(reg_name, field_name) \
+ hubp2->hubp_shift->field_name, hubp2->hubp_mask->field_name
+
+void hubp3_set_vm_system_aperture_settings(struct hubp *hubp,
+ struct vm_system_aperture_param *apt)
+{
+ struct dcn20_hubp *hubp2 = TO_DCN20_HUBP(hubp);
+
+ PHYSICAL_ADDRESS_LOC mc_vm_apt_default;
+ PHYSICAL_ADDRESS_LOC mc_vm_apt_low;
+ PHYSICAL_ADDRESS_LOC mc_vm_apt_high;
+
+ // The format of default addr is 48:12 of the 48 bit addr
+ mc_vm_apt_default.quad_part = apt->sys_default.quad_part >> 12;
+
+ // The format of high/low are 48:18 of the 48 bit addr
+ mc_vm_apt_low.quad_part = apt->sys_low.quad_part >> 18;
+ mc_vm_apt_high.quad_part = apt->sys_high.quad_part >> 18;
+
+ REG_SET(DCN_VM_SYSTEM_APERTURE_LOW_ADDR, 0,
+ MC_VM_SYSTEM_APERTURE_LOW_ADDR, mc_vm_apt_low.quad_part);
+
+ REG_SET(DCN_VM_SYSTEM_APERTURE_HIGH_ADDR, 0,
+ MC_VM_SYSTEM_APERTURE_HIGH_ADDR, mc_vm_apt_high.quad_part);
+
+ REG_SET_2(DCN_VM_MX_L1_TLB_CNTL, 0,
+ ENABLE_L1_TLB, 1,
+ SYSTEM_ACCESS_MODE, 0x3);
+}
+
+bool hubp3_program_surface_flip_and_addr(
+ struct hubp *hubp,
+ const struct dc_plane_address *address,
+ bool flip_immediate)
+{
+ struct dcn20_hubp *hubp2 = TO_DCN20_HUBP(hubp);
+
+ //program flip type
+ REG_UPDATE(DCSURF_FLIP_CONTROL,
+ SURFACE_FLIP_TYPE, flip_immediate);
+
+ // Program VMID reg
+ if (flip_immediate == 0)
+ REG_UPDATE(VMID_SETTINGS_0,
+ VMID, address->vmid);
+
+ if (address->type == PLN_ADDR_TYPE_GRPH_STEREO) {
+ REG_UPDATE(DCSURF_FLIP_CONTROL, SURFACE_FLIP_MODE_FOR_STEREOSYNC, 0x1);
+ REG_UPDATE(DCSURF_FLIP_CONTROL, SURFACE_FLIP_IN_STEREOSYNC, 0x1);
+
+ } else {
+ // turn off stereo if not in stereo
+ REG_UPDATE(DCSURF_FLIP_CONTROL, SURFACE_FLIP_MODE_FOR_STEREOSYNC, 0x0);
+ REG_UPDATE(DCSURF_FLIP_CONTROL, SURFACE_FLIP_IN_STEREOSYNC, 0x0);
+ }
+
+ /* HW automatically latch rest of address register on write to
+ * DCSURF_PRIMARY_SURFACE_ADDRESS if SURFACE_UPDATE_LOCK is not used
+ *
+ * program high first and then the low addr, order matters!
+ */
+ switch (address->type) {
+ case PLN_ADDR_TYPE_GRAPHICS:
+ /* DCN1.0 does not support const color
+ * TODO: program DCHUBBUB_RET_PATH_DCC_CFGx_0/1
+ * base on address->grph.dcc_const_color
+ * x = 0, 2, 4, 6 for pipe 0, 1, 2, 3 for rgb and luma
+ * x = 1, 3, 5, 7 for pipe 0, 1, 2, 3 for chroma
+ */
+
+ if (address->grph.addr.quad_part == 0)
+ break;
+
+ REG_UPDATE_2(DCSURF_SURFACE_CONTROL,
+ PRIMARY_SURFACE_TMZ, address->tmz_surface,
+ PRIMARY_META_SURFACE_TMZ, address->tmz_surface);
+
+ if (address->grph.meta_addr.quad_part != 0) {
+ REG_SET(DCSURF_PRIMARY_META_SURFACE_ADDRESS_HIGH, 0,
+ PRIMARY_META_SURFACE_ADDRESS_HIGH,
+ address->grph.meta_addr.high_part);
+
+ REG_SET(DCSURF_PRIMARY_META_SURFACE_ADDRESS, 0,
+ PRIMARY_META_SURFACE_ADDRESS,
+ address->grph.meta_addr.low_part);
+ }
+
+ REG_SET(DCSURF_PRIMARY_SURFACE_ADDRESS_HIGH, 0,
+ PRIMARY_SURFACE_ADDRESS_HIGH,
+ address->grph.addr.high_part);
+
+ REG_SET(DCSURF_PRIMARY_SURFACE_ADDRESS, 0,
+ PRIMARY_SURFACE_ADDRESS,
+ address->grph.addr.low_part);
+ break;
+ case PLN_ADDR_TYPE_VIDEO_PROGRESSIVE:
+ if (address->video_progressive.luma_addr.quad_part == 0
+ || address->video_progressive.chroma_addr.quad_part == 0)
+ break;
+
+ REG_UPDATE_4(DCSURF_SURFACE_CONTROL,
+ PRIMARY_SURFACE_TMZ, address->tmz_surface,
+ PRIMARY_SURFACE_TMZ_C, address->tmz_surface,
+ PRIMARY_META_SURFACE_TMZ, address->tmz_surface,
+ PRIMARY_META_SURFACE_TMZ_C, address->tmz_surface);
+
+ if (address->video_progressive.luma_meta_addr.quad_part != 0) {
+ REG_SET(DCSURF_PRIMARY_META_SURFACE_ADDRESS_HIGH_C, 0,
+ PRIMARY_META_SURFACE_ADDRESS_HIGH_C,
+ address->video_progressive.chroma_meta_addr.high_part);
+
+ REG_SET(DCSURF_PRIMARY_META_SURFACE_ADDRESS_C, 0,
+ PRIMARY_META_SURFACE_ADDRESS_C,
+ address->video_progressive.chroma_meta_addr.low_part);
+
+ REG_SET(DCSURF_PRIMARY_META_SURFACE_ADDRESS_HIGH, 0,
+ PRIMARY_META_SURFACE_ADDRESS_HIGH,
+ address->video_progressive.luma_meta_addr.high_part);
+
+ REG_SET(DCSURF_PRIMARY_META_SURFACE_ADDRESS, 0,
+ PRIMARY_META_SURFACE_ADDRESS,
+ address->video_progressive.luma_meta_addr.low_part);
+ }
+
+ REG_SET(DCSURF_PRIMARY_SURFACE_ADDRESS_HIGH_C, 0,
+ PRIMARY_SURFACE_ADDRESS_HIGH_C,
+ address->video_progressive.chroma_addr.high_part);
+
+ REG_SET(DCSURF_PRIMARY_SURFACE_ADDRESS_C, 0,
+ PRIMARY_SURFACE_ADDRESS_C,
+ address->video_progressive.chroma_addr.low_part);
+
+ REG_SET(DCSURF_PRIMARY_SURFACE_ADDRESS_HIGH, 0,
+ PRIMARY_SURFACE_ADDRESS_HIGH,
+ address->video_progressive.luma_addr.high_part);
+
+ REG_SET(DCSURF_PRIMARY_SURFACE_ADDRESS, 0,
+ PRIMARY_SURFACE_ADDRESS,
+ address->video_progressive.luma_addr.low_part);
+ break;
+ case PLN_ADDR_TYPE_GRPH_STEREO:
+ if (address->grph_stereo.left_addr.quad_part == 0)
+ break;
+ if (address->grph_stereo.right_addr.quad_part == 0)
+ break;
+
+ REG_UPDATE_8(DCSURF_SURFACE_CONTROL,
+ PRIMARY_SURFACE_TMZ, address->tmz_surface,
+ PRIMARY_SURFACE_TMZ_C, address->tmz_surface,
+ PRIMARY_META_SURFACE_TMZ, address->tmz_surface,
+ PRIMARY_META_SURFACE_TMZ_C, address->tmz_surface,
+ SECONDARY_SURFACE_TMZ, address->tmz_surface,
+ SECONDARY_SURFACE_TMZ_C, address->tmz_surface,
+ SECONDARY_META_SURFACE_TMZ, address->tmz_surface,
+ SECONDARY_META_SURFACE_TMZ_C, address->tmz_surface);
+
+ if (address->grph_stereo.right_meta_addr.quad_part != 0) {
+
+ REG_SET(DCSURF_SECONDARY_META_SURFACE_ADDRESS_HIGH_C, 0,
+ SECONDARY_META_SURFACE_ADDRESS_HIGH_C,
+ address->grph_stereo.right_alpha_meta_addr.high_part);
+
+ REG_SET(DCSURF_SECONDARY_META_SURFACE_ADDRESS_C, 0,
+ SECONDARY_META_SURFACE_ADDRESS_C,
+ address->grph_stereo.right_alpha_meta_addr.low_part);
+
+ REG_SET(DCSURF_SECONDARY_META_SURFACE_ADDRESS_HIGH, 0,
+ SECONDARY_META_SURFACE_ADDRESS_HIGH,
+ address->grph_stereo.right_meta_addr.high_part);
+
+ REG_SET(DCSURF_SECONDARY_META_SURFACE_ADDRESS, 0,
+ SECONDARY_META_SURFACE_ADDRESS,
+ address->grph_stereo.right_meta_addr.low_part);
+ }
+ if (address->grph_stereo.left_meta_addr.quad_part != 0) {
+
+ REG_SET(DCSURF_PRIMARY_META_SURFACE_ADDRESS_HIGH_C, 0,
+ PRIMARY_META_SURFACE_ADDRESS_HIGH_C,
+ address->grph_stereo.left_alpha_meta_addr.high_part);
+
+ REG_SET(DCSURF_PRIMARY_META_SURFACE_ADDRESS_C, 0,
+ PRIMARY_META_SURFACE_ADDRESS_C,
+ address->grph_stereo.left_alpha_meta_addr.low_part);
+
+ REG_SET(DCSURF_PRIMARY_META_SURFACE_ADDRESS_HIGH, 0,
+ PRIMARY_META_SURFACE_ADDRESS_HIGH,
+ address->grph_stereo.left_meta_addr.high_part);
+
+ REG_SET(DCSURF_PRIMARY_META_SURFACE_ADDRESS, 0,
+ PRIMARY_META_SURFACE_ADDRESS,
+ address->grph_stereo.left_meta_addr.low_part);
+ }
+
+ REG_SET(DCSURF_SECONDARY_SURFACE_ADDRESS_HIGH_C, 0,
+ SECONDARY_SURFACE_ADDRESS_HIGH_C,
+ address->grph_stereo.right_alpha_addr.high_part);
+
+ REG_SET(DCSURF_SECONDARY_SURFACE_ADDRESS_C, 0,
+ SECONDARY_SURFACE_ADDRESS_C,
+ address->grph_stereo.right_alpha_addr.low_part);
+
+ REG_SET(DCSURF_SECONDARY_SURFACE_ADDRESS_HIGH, 0,
+ SECONDARY_SURFACE_ADDRESS_HIGH,
+ address->grph_stereo.right_addr.high_part);
+
+ REG_SET(DCSURF_SECONDARY_SURFACE_ADDRESS, 0,
+ SECONDARY_SURFACE_ADDRESS,
+ address->grph_stereo.right_addr.low_part);
+
+ REG_SET(DCSURF_PRIMARY_SURFACE_ADDRESS_HIGH_C, 0,
+ PRIMARY_SURFACE_ADDRESS_HIGH_C,
+ address->grph_stereo.left_alpha_addr.high_part);
+
+ REG_SET(DCSURF_PRIMARY_SURFACE_ADDRESS_C, 0,
+ PRIMARY_SURFACE_ADDRESS_C,
+ address->grph_stereo.left_alpha_addr.low_part);
+
+ REG_SET(DCSURF_PRIMARY_SURFACE_ADDRESS_HIGH, 0,
+ PRIMARY_SURFACE_ADDRESS_HIGH,
+ address->grph_stereo.left_addr.high_part);
+
+ REG_SET(DCSURF_PRIMARY_SURFACE_ADDRESS, 0,
+ PRIMARY_SURFACE_ADDRESS,
+ address->grph_stereo.left_addr.low_part);
+ break;
+ case PLN_ADDR_TYPE_RGBEA:
+ if (address->rgbea.addr.quad_part == 0
+ || address->rgbea.alpha_addr.quad_part == 0)
+ break;
+
+ REG_UPDATE_4(DCSURF_SURFACE_CONTROL,
+ PRIMARY_SURFACE_TMZ, address->tmz_surface,
+ PRIMARY_SURFACE_TMZ_C, address->tmz_surface,
+ PRIMARY_META_SURFACE_TMZ, address->tmz_surface,
+ PRIMARY_META_SURFACE_TMZ_C, address->tmz_surface);
+
+ if (address->rgbea.meta_addr.quad_part != 0) {
+
+ REG_SET(DCSURF_PRIMARY_META_SURFACE_ADDRESS_HIGH_C, 0,
+ PRIMARY_META_SURFACE_ADDRESS_HIGH_C,
+ address->rgbea.alpha_meta_addr.high_part);
+
+ REG_SET(DCSURF_PRIMARY_META_SURFACE_ADDRESS_C, 0,
+ PRIMARY_META_SURFACE_ADDRESS_C,
+ address->rgbea.alpha_meta_addr.low_part);
+
+ REG_SET(DCSURF_PRIMARY_META_SURFACE_ADDRESS_HIGH, 0,
+ PRIMARY_META_SURFACE_ADDRESS_HIGH,
+ address->rgbea.meta_addr.high_part);
+
+ REG_SET(DCSURF_PRIMARY_META_SURFACE_ADDRESS, 0,
+ PRIMARY_META_SURFACE_ADDRESS,
+ address->rgbea.meta_addr.low_part);
+ }
+
+ REG_SET(DCSURF_PRIMARY_SURFACE_ADDRESS_HIGH_C, 0,
+ PRIMARY_SURFACE_ADDRESS_HIGH_C,
+ address->rgbea.alpha_addr.high_part);
+
+ REG_SET(DCSURF_PRIMARY_SURFACE_ADDRESS_C, 0,
+ PRIMARY_SURFACE_ADDRESS_C,
+ address->rgbea.alpha_addr.low_part);
+
+ REG_SET(DCSURF_PRIMARY_SURFACE_ADDRESS_HIGH, 0,
+ PRIMARY_SURFACE_ADDRESS_HIGH,
+ address->rgbea.addr.high_part);
+
+ REG_SET(DCSURF_PRIMARY_SURFACE_ADDRESS, 0,
+ PRIMARY_SURFACE_ADDRESS,
+ address->rgbea.addr.low_part);
+ break;
+ default:
+ BREAK_TO_DEBUGGER();
+ break;
+ }
+
+ hubp->request_address = *address;
+
+ return true;
+}
+
+static void hubp3_program_tiling(
+ struct dcn20_hubp *hubp2,
+ const union dc_tiling_info *info,
+ const enum surface_pixel_format pixel_format)
+{
+ REG_UPDATE_4(DCSURF_ADDR_CONFIG,
+ NUM_PIPES, log_2(info->gfx9.num_pipes),
+ PIPE_INTERLEAVE, info->gfx9.pipe_interleave,
+ MAX_COMPRESSED_FRAGS, log_2(info->gfx9.max_compressed_frags),
+ NUM_PKRS, log_2(info->gfx9.num_pkrs));
+
+ REG_UPDATE_3(DCSURF_TILING_CONFIG,
+ SW_MODE, info->gfx9.swizzle,
+ META_LINEAR, info->gfx9.meta_linear,
+ PIPE_ALIGNED, info->gfx9.pipe_aligned);
+
+}
+
+void hubp3_dcc_control(struct hubp *hubp, bool enable,
+ enum hubp_ind_block_size blk_size)
+{
+ uint32_t dcc_en = enable ? 1 : 0;
+ struct dcn20_hubp *hubp2 = TO_DCN20_HUBP(hubp);
+
+ REG_UPDATE_4(DCSURF_SURFACE_CONTROL,
+ PRIMARY_SURFACE_DCC_EN, dcc_en,
+ PRIMARY_SURFACE_DCC_IND_BLK, blk_size,
+ SECONDARY_SURFACE_DCC_EN, dcc_en,
+ SECONDARY_SURFACE_DCC_IND_BLK, blk_size);
+}
+
+void hubp3_dcc_control_sienna_cichlid(struct hubp *hubp,
+ struct dc_plane_dcc_param *dcc)
+{
+ struct dcn20_hubp *hubp2 = TO_DCN20_HUBP(hubp);
+
+ /*Workaround until UMD fix the new dcc_ind_blk interface */
+ if (dcc->independent_64b_blks && dcc->dcc_ind_blk == 0)
+ dcc->dcc_ind_blk = 1;
+ if (dcc->independent_64b_blks_c && dcc->dcc_ind_blk_c == 0)
+ dcc->dcc_ind_blk_c = 1;
+
+ REG_UPDATE_6(DCSURF_SURFACE_CONTROL,
+ PRIMARY_SURFACE_DCC_EN, dcc->enable,
+ PRIMARY_SURFACE_DCC_IND_BLK, dcc->dcc_ind_blk,
+ PRIMARY_SURFACE_DCC_IND_BLK_C, dcc->dcc_ind_blk_c,
+ SECONDARY_SURFACE_DCC_EN, dcc->enable,
+ SECONDARY_SURFACE_DCC_IND_BLK, dcc->dcc_ind_blk,
+ SECONDARY_SURFACE_DCC_IND_BLK_C, dcc->dcc_ind_blk_c);
+}
+
+void hubp3_dmdata_set_attributes(
+ struct hubp *hubp,
+ const struct dc_dmdata_attributes *attr)
+{
+ struct dcn20_hubp *hubp2 = TO_DCN20_HUBP(hubp);
+
+ /*always HW mode */
+ REG_UPDATE(DMDATA_CNTL,
+ DMDATA_MODE, 1);
+
+ /* for DMDATA flip, need to use SURFACE_UPDATE_LOCK */
+ REG_UPDATE(DCSURF_FLIP_CONTROL, SURFACE_UPDATE_LOCK, 1);
+
+ /* toggle DMDATA_UPDATED and set repeat and size */
+ REG_UPDATE(DMDATA_CNTL,
+ DMDATA_UPDATED, 0);
+ REG_UPDATE_3(DMDATA_CNTL,
+ DMDATA_UPDATED, 1,
+ DMDATA_REPEAT, attr->dmdata_repeat,
+ DMDATA_SIZE, attr->dmdata_size);
+
+ /* set DMDATA address */
+ REG_WRITE(DMDATA_ADDRESS_LOW, attr->address.low_part);
+ REG_UPDATE(DMDATA_ADDRESS_HIGH,
+ DMDATA_ADDRESS_HIGH, attr->address.high_part);
+
+ REG_UPDATE(DCSURF_FLIP_CONTROL, SURFACE_UPDATE_LOCK, 0);
+
+}
+
+
+void hubp3_program_surface_config(
+ struct hubp *hubp,
+ enum surface_pixel_format format,
+ union dc_tiling_info *tiling_info,
+ struct plane_size *plane_size,
+ enum dc_rotation_angle rotation,
+ struct dc_plane_dcc_param *dcc,
+ bool horizontal_mirror,
+ unsigned int compat_level)
+{
+ struct dcn20_hubp *hubp2 = TO_DCN20_HUBP(hubp);
+
+ hubp3_dcc_control_sienna_cichlid(hubp, dcc);
+ hubp3_program_tiling(hubp2, tiling_info, format);
+ hubp2_program_size(hubp, format, plane_size, dcc);
+ hubp2_program_rotation(hubp, rotation, horizontal_mirror);
+ hubp2_program_pixel_format(hubp, format);
+}
+
+static void hubp3_program_deadline(
+ struct hubp *hubp,
+ struct _vcs_dpi_display_dlg_regs_st *dlg_attr,
+ struct _vcs_dpi_display_ttu_regs_st *ttu_attr)
+{
+ struct dcn20_hubp *hubp2 = TO_DCN20_HUBP(hubp);
+
+ hubp2_program_deadline(hubp, dlg_attr, ttu_attr);
+ REG_UPDATE(DCN_DMDATA_VM_CNTL,
+ REFCYC_PER_VM_DMDATA, dlg_attr->refcyc_per_vm_dmdata);
+}
+
+void hubp3_read_state(struct hubp *hubp)
+{
+ struct dcn20_hubp *hubp2 = TO_DCN20_HUBP(hubp);
+ struct dcn_hubp_state *s = &hubp2->state;
+ struct _vcs_dpi_display_rq_regs_st *rq_regs = &s->rq_regs;
+
+ hubp2_read_state_common(hubp);
+
+ REG_GET_7(DCHUBP_REQ_SIZE_CONFIG,
+ CHUNK_SIZE, &rq_regs->rq_regs_l.chunk_size,
+ MIN_CHUNK_SIZE, &rq_regs->rq_regs_l.min_chunk_size,
+ META_CHUNK_SIZE, &rq_regs->rq_regs_l.meta_chunk_size,
+ MIN_META_CHUNK_SIZE, &rq_regs->rq_regs_l.min_meta_chunk_size,
+ DPTE_GROUP_SIZE, &rq_regs->rq_regs_l.dpte_group_size,
+ SWATH_HEIGHT, &rq_regs->rq_regs_l.swath_height,
+ PTE_ROW_HEIGHT_LINEAR, &rq_regs->rq_regs_l.pte_row_height_linear);
+
+ REG_GET_7(DCHUBP_REQ_SIZE_CONFIG_C,
+ CHUNK_SIZE_C, &rq_regs->rq_regs_c.chunk_size,
+ MIN_CHUNK_SIZE_C, &rq_regs->rq_regs_c.min_chunk_size,
+ META_CHUNK_SIZE_C, &rq_regs->rq_regs_c.meta_chunk_size,
+ MIN_META_CHUNK_SIZE_C, &rq_regs->rq_regs_c.min_meta_chunk_size,
+ DPTE_GROUP_SIZE_C, &rq_regs->rq_regs_c.dpte_group_size,
+ SWATH_HEIGHT_C, &rq_regs->rq_regs_c.swath_height,
+ PTE_ROW_HEIGHT_LINEAR_C, &rq_regs->rq_regs_c.pte_row_height_linear);
+
+}
+
+void hubp3_setup(
+ struct hubp *hubp,
+ struct _vcs_dpi_display_dlg_regs_st *dlg_attr,
+ struct _vcs_dpi_display_ttu_regs_st *ttu_attr,
+ struct _vcs_dpi_display_rq_regs_st *rq_regs,
+ struct _vcs_dpi_display_pipe_dest_params_st *pipe_dest)
+{
+ /* otg is locked when this func is called. Register are double buffered.
+ * disable the requestors is not needed
+ */
+ hubp2_vready_at_or_After_vsync(hubp, pipe_dest);
+ hubp21_program_requestor(hubp, rq_regs);
+ hubp3_program_deadline(hubp, dlg_attr, ttu_attr);
+}
+
+void hubp3_init(struct hubp *hubp)
+{
+ // DEDCN21-133: Inconsistent row starting line for flip between DPTE and Meta
+ // This is a chicken bit to enable the ECO fix.
+
+ struct dcn20_hubp *hubp2 = TO_DCN20_HUBP(hubp);
+ //hubp[i].HUBPREQ_DEBUG.HUBPREQ_DEBUG[26] = 1;
+ REG_WRITE(HUBPREQ_DEBUG, 1 << 26);
+}
+
+static struct hubp_funcs dcn30_hubp_funcs = {
+ .hubp_enable_tripleBuffer = hubp2_enable_triplebuffer,
+ .hubp_is_triplebuffer_enabled = hubp2_is_triplebuffer_enabled,
+ .hubp_program_surface_flip_and_addr = hubp3_program_surface_flip_and_addr,
+ .hubp_program_surface_config = hubp3_program_surface_config,
+ .hubp_is_flip_pending = hubp2_is_flip_pending,
+ .hubp_setup = hubp3_setup,
+ .hubp_setup_interdependent = hubp2_setup_interdependent,
+ .hubp_set_vm_system_aperture_settings = hubp3_set_vm_system_aperture_settings,
+ .set_blank = hubp2_set_blank,
+ .dcc_control = hubp3_dcc_control,
+ .mem_program_viewport = min_set_viewport,
+ .set_cursor_attributes = hubp2_cursor_set_attributes,
+ .set_cursor_position = hubp2_cursor_set_position,
+ .hubp_clk_cntl = hubp2_clk_cntl,
+ .hubp_vtg_sel = hubp2_vtg_sel,
+ .dmdata_set_attributes = hubp3_dmdata_set_attributes,
+ .dmdata_load = hubp2_dmdata_load,
+ .dmdata_status_done = hubp2_dmdata_status_done,
+ .hubp_read_state = hubp3_read_state,
+ .hubp_clear_underflow = hubp2_clear_underflow,
+ .hubp_set_flip_control_surface_gsl = hubp2_set_flip_control_surface_gsl,
+ .hubp_init = hubp3_init,
+};
+
+bool hubp3_construct(
+ struct dcn20_hubp *hubp2,
+ struct dc_context *ctx,
+ uint32_t inst,
+ const struct dcn_hubp2_registers *hubp_regs,
+ const struct dcn_hubp2_shift *hubp_shift,
+ const struct dcn_hubp2_mask *hubp_mask)
+{
+ hubp2->base.funcs = &dcn30_hubp_funcs;
+ hubp2->base.ctx = ctx;
+ hubp2->hubp_regs = hubp_regs;
+ hubp2->hubp_shift = hubp_shift;
+ hubp2->hubp_mask = hubp_mask;
+ hubp2->base.inst = inst;
+ hubp2->base.opp_id = OPP_ID_INVALID;
+ hubp2->base.mpcc_id = 0xf;
+
+ return true;
+}
diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hubp.h b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hubp.h
new file mode 100644
index 000000000000..fd1fb3c531d1
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hubp.h
@@ -0,0 +1,292 @@
+/*
+ * Copyright 2020 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef __DC_HUBP_DCN30_H__
+#define __DC_HUBP_DCN30_H__
+
+#include "dcn20/dcn20_hubp.h"
+#include "dcn21/dcn21_hubp.h"
+
+#define HUBP_REG_LIST_DCN30(id)\
+ HUBP_REG_LIST_DCN21(id),\
+ SRI(DCN_DMDATA_VM_CNTL, HUBPREQ, id)
+
+
+#define HUBP_MASK_SH_LIST_DCN30_BASE(mask_sh)\
+ HUBP_MASK_SH_LIST_DCN21_COMMON(mask_sh),\
+ HUBP_SF(HUBP0_DCSURF_SURFACE_CONFIG, ALPHA_PLANE_EN, mask_sh),\
+ HUBP_SF(HUBPREQ0_DCN_DMDATA_VM_CNTL, REFCYC_PER_VM_DMDATA, mask_sh),\
+ HUBP_SF(HUBPREQ0_DCN_DMDATA_VM_CNTL, DMDATA_VM_FAULT_STATUS, mask_sh),\
+ HUBP_SF(HUBPREQ0_DCN_DMDATA_VM_CNTL, DMDATA_VM_FAULT_STATUS_CLEAR, mask_sh),\
+ HUBP_SF(HUBPREQ0_DCN_DMDATA_VM_CNTL, DMDATA_VM_UNDERFLOW_STATUS, mask_sh),\
+ HUBP_SF(HUBPREQ0_DCN_DMDATA_VM_CNTL, DMDATA_VM_LATE_STATUS, mask_sh),\
+ HUBP_SF(HUBPREQ0_DCN_DMDATA_VM_CNTL, DMDATA_VM_UNDERFLOW_STATUS_CLEAR, mask_sh),\
+ HUBP_SF(HUBPREQ0_DCN_DMDATA_VM_CNTL, DMDATA_VM_DONE, mask_sh),\
+ HUBP_SF(HUBP0_DCSURF_ADDR_CONFIG, NUM_PKRS, mask_sh)
+
+
+#define HUBP_MASK_SH_LIST_DCN30(mask_sh)\
+ HUBP_SF(HUBPREQ0_DCN_DMDATA_VM_CNTL, REFCYC_PER_VM_DMDATA, mask_sh),\
+ HUBP_SF(HUBPREQ0_DCN_DMDATA_VM_CNTL, DMDATA_VM_FAULT_STATUS, mask_sh),\
+ HUBP_SF(HUBPREQ0_DCN_DMDATA_VM_CNTL, DMDATA_VM_FAULT_STATUS_CLEAR, mask_sh),\
+ HUBP_SF(HUBPREQ0_DCN_DMDATA_VM_CNTL, DMDATA_VM_UNDERFLOW_STATUS, mask_sh),\
+ HUBP_SF(HUBPREQ0_DCN_DMDATA_VM_CNTL, DMDATA_VM_LATE_STATUS, mask_sh),\
+ HUBP_SF(HUBPREQ0_DCN_DMDATA_VM_CNTL, DMDATA_VM_UNDERFLOW_STATUS_CLEAR, mask_sh),\
+ HUBP_SF(HUBPREQ0_DCN_DMDATA_VM_CNTL, DMDATA_VM_DONE, mask_sh),\
+ HUBP_SF(HUBP0_DCHUBP_CNTL, HUBP_BLANK_EN, mask_sh),\
+ HUBP_SF(HUBP0_DCHUBP_CNTL, HUBP_TTU_DISABLE, mask_sh),\
+ HUBP_SF(HUBP0_DCHUBP_CNTL, HUBP_UNDERFLOW_STATUS, mask_sh),\
+ HUBP_SF(HUBP0_DCHUBP_CNTL, HUBP_UNDERFLOW_CLEAR, mask_sh),\
+ HUBP_SF(HUBP0_DCHUBP_CNTL, HUBP_NO_OUTSTANDING_REQ, mask_sh),\
+ HUBP_SF(HUBP0_DCHUBP_CNTL, HUBP_VTG_SEL, mask_sh),\
+ HUBP_SF(HUBP0_DCHUBP_CNTL, HUBP_DISABLE, mask_sh),\
+ HUBP_SF(HUBP0_DCSURF_ADDR_CONFIG, NUM_PIPES, mask_sh),\
+ HUBP_SF(HUBP0_DCSURF_ADDR_CONFIG, PIPE_INTERLEAVE, mask_sh),\
+ HUBP_SF(HUBP0_DCSURF_ADDR_CONFIG, MAX_COMPRESSED_FRAGS, mask_sh),\
+ HUBP_SF(HUBP0_DCSURF_ADDR_CONFIG, NUM_PKRS, mask_sh),\
+ HUBP_SF(HUBP0_DCSURF_TILING_CONFIG, SW_MODE, mask_sh),\
+ HUBP_SF(HUBP0_DCSURF_TILING_CONFIG, META_LINEAR, mask_sh),\
+ HUBP_SF(HUBP0_DCSURF_TILING_CONFIG, PIPE_ALIGNED, mask_sh),\
+ HUBP_SF(HUBPREQ0_DCSURF_SURFACE_PITCH, PITCH, mask_sh),\
+ HUBP_SF(HUBPREQ0_DCSURF_SURFACE_PITCH, META_PITCH, mask_sh),\
+ HUBP_SF(HUBPREQ0_DCSURF_SURFACE_PITCH_C, PITCH_C, mask_sh),\
+ HUBP_SF(HUBPREQ0_DCSURF_SURFACE_PITCH_C, META_PITCH_C, mask_sh),\
+ HUBP_SF(HUBP0_DCSURF_SURFACE_CONFIG, SURFACE_PIXEL_FORMAT, mask_sh),\
+ HUBP_SF(HUBPREQ0_DCSURF_FLIP_CONTROL, SURFACE_FLIP_TYPE, mask_sh),\
+ HUBP_SF(HUBPREQ0_DCSURF_FLIP_CONTROL, SURFACE_FLIP_MODE_FOR_STEREOSYNC, mask_sh),\
+ HUBP_SF(HUBPREQ0_DCSURF_FLIP_CONTROL, SURFACE_FLIP_IN_STEREOSYNC, mask_sh),\
+ HUBP_SF(HUBPREQ0_DCSURF_FLIP_CONTROL, SURFACE_FLIP_PENDING, mask_sh),\
+ HUBP_SF(HUBPREQ0_DCSURF_FLIP_CONTROL, SURFACE_UPDATE_LOCK, mask_sh),\
+ HUBP_SF(HUBP0_DCSURF_PRI_VIEWPORT_DIMENSION, PRI_VIEWPORT_WIDTH, mask_sh),\
+ HUBP_SF(HUBP0_DCSURF_PRI_VIEWPORT_DIMENSION, PRI_VIEWPORT_HEIGHT, mask_sh),\
+ HUBP_SF(HUBP0_DCSURF_PRI_VIEWPORT_START, PRI_VIEWPORT_X_START, mask_sh),\
+ HUBP_SF(HUBP0_DCSURF_PRI_VIEWPORT_START, PRI_VIEWPORT_Y_START, mask_sh),\
+ HUBP_SF(HUBP0_DCSURF_SEC_VIEWPORT_DIMENSION, SEC_VIEWPORT_WIDTH, mask_sh),\
+ HUBP_SF(HUBP0_DCSURF_SEC_VIEWPORT_DIMENSION, SEC_VIEWPORT_HEIGHT, mask_sh),\
+ HUBP_SF(HUBP0_DCSURF_SEC_VIEWPORT_START, SEC_VIEWPORT_X_START, mask_sh),\
+ HUBP_SF(HUBP0_DCSURF_SEC_VIEWPORT_START, SEC_VIEWPORT_Y_START, mask_sh),\
+ HUBP_SF(HUBP0_DCSURF_PRI_VIEWPORT_DIMENSION_C, PRI_VIEWPORT_WIDTH_C, mask_sh),\
+ HUBP_SF(HUBP0_DCSURF_PRI_VIEWPORT_DIMENSION_C, PRI_VIEWPORT_HEIGHT_C, mask_sh),\
+ HUBP_SF(HUBP0_DCSURF_PRI_VIEWPORT_START_C, PRI_VIEWPORT_X_START_C, mask_sh),\
+ HUBP_SF(HUBP0_DCSURF_PRI_VIEWPORT_START_C, PRI_VIEWPORT_Y_START_C, mask_sh),\
+ HUBP_SF(HUBP0_DCSURF_SEC_VIEWPORT_DIMENSION_C, SEC_VIEWPORT_WIDTH_C, mask_sh),\
+ HUBP_SF(HUBP0_DCSURF_SEC_VIEWPORT_DIMENSION_C, SEC_VIEWPORT_HEIGHT_C, mask_sh),\
+ HUBP_SF(HUBP0_DCSURF_SEC_VIEWPORT_START_C, SEC_VIEWPORT_X_START_C, mask_sh),\
+ HUBP_SF(HUBP0_DCSURF_SEC_VIEWPORT_START_C, SEC_VIEWPORT_Y_START_C, mask_sh),\
+ HUBP_SF(HUBPREQ0_DCSURF_PRIMARY_SURFACE_ADDRESS_HIGH, PRIMARY_SURFACE_ADDRESS_HIGH, mask_sh),\
+ HUBP_SF(HUBPREQ0_DCSURF_PRIMARY_SURFACE_ADDRESS, PRIMARY_SURFACE_ADDRESS, mask_sh),\
+ HUBP_SF(HUBPREQ0_DCSURF_SECONDARY_SURFACE_ADDRESS_HIGH, SECONDARY_SURFACE_ADDRESS_HIGH, mask_sh),\
+ HUBP_SF(HUBPREQ0_DCSURF_SECONDARY_SURFACE_ADDRESS, SECONDARY_SURFACE_ADDRESS, mask_sh),\
+ HUBP_SF(HUBPREQ0_DCSURF_PRIMARY_META_SURFACE_ADDRESS_HIGH, PRIMARY_META_SURFACE_ADDRESS_HIGH, mask_sh),\
+ HUBP_SF(HUBPREQ0_DCSURF_PRIMARY_META_SURFACE_ADDRESS, PRIMARY_META_SURFACE_ADDRESS, mask_sh),\
+ HUBP_SF(HUBPREQ0_DCSURF_SECONDARY_META_SURFACE_ADDRESS_HIGH, SECONDARY_META_SURFACE_ADDRESS_HIGH, mask_sh),\
+ HUBP_SF(HUBPREQ0_DCSURF_SECONDARY_META_SURFACE_ADDRESS, SECONDARY_META_SURFACE_ADDRESS, mask_sh),\
+ HUBP_SF(HUBPREQ0_DCSURF_PRIMARY_SURFACE_ADDRESS_HIGH_C, PRIMARY_SURFACE_ADDRESS_HIGH_C, mask_sh),\
+ HUBP_SF(HUBPREQ0_DCSURF_PRIMARY_SURFACE_ADDRESS_C, PRIMARY_SURFACE_ADDRESS_C, mask_sh),\
+ HUBP_SF(HUBPREQ0_DCSURF_SECONDARY_SURFACE_ADDRESS_HIGH_C, SECONDARY_SURFACE_ADDRESS_HIGH_C, mask_sh),\
+ HUBP_SF(HUBPREQ0_DCSURF_SECONDARY_SURFACE_ADDRESS_C, SECONDARY_SURFACE_ADDRESS_C, mask_sh),\
+ HUBP_SF(HUBPREQ0_DCSURF_PRIMARY_META_SURFACE_ADDRESS_HIGH_C, PRIMARY_META_SURFACE_ADDRESS_HIGH_C, mask_sh),\
+ HUBP_SF(HUBPREQ0_DCSURF_PRIMARY_META_SURFACE_ADDRESS_C, PRIMARY_META_SURFACE_ADDRESS_C, mask_sh),\
+ HUBP_SF(HUBPREQ0_DCSURF_SECONDARY_META_SURFACE_ADDRESS_HIGH_C, SECONDARY_META_SURFACE_ADDRESS_HIGH_C, mask_sh),\
+ HUBP_SF(HUBPREQ0_DCSURF_SECONDARY_META_SURFACE_ADDRESS_C, SECONDARY_META_SURFACE_ADDRESS_C, mask_sh),\
+ HUBP_SF(HUBPREQ0_DCSURF_SURFACE_INUSE, SURFACE_INUSE_ADDRESS, mask_sh),\
+ HUBP_SF(HUBPREQ0_DCSURF_SURFACE_INUSE_HIGH, SURFACE_INUSE_ADDRESS_HIGH, mask_sh),\
+ HUBP_SF(HUBPREQ0_DCSURF_SURFACE_INUSE_C, SURFACE_INUSE_ADDRESS_C, mask_sh),\
+ HUBP_SF(HUBPREQ0_DCSURF_SURFACE_INUSE_HIGH_C, SURFACE_INUSE_ADDRESS_HIGH_C, mask_sh),\
+ HUBP_SF(HUBPREQ0_DCSURF_SURFACE_EARLIEST_INUSE, SURFACE_EARLIEST_INUSE_ADDRESS, mask_sh),\
+ HUBP_SF(HUBPREQ0_DCSURF_SURFACE_EARLIEST_INUSE_HIGH, SURFACE_EARLIEST_INUSE_ADDRESS_HIGH, mask_sh),\
+ HUBP_SF(HUBPREQ0_DCSURF_SURFACE_EARLIEST_INUSE_C, SURFACE_EARLIEST_INUSE_ADDRESS_C, mask_sh),\
+ HUBP_SF(HUBPREQ0_DCSURF_SURFACE_EARLIEST_INUSE_HIGH_C, SURFACE_EARLIEST_INUSE_ADDRESS_HIGH_C, mask_sh),\
+ HUBP_SF(HUBPREQ0_DCSURF_SURFACE_CONTROL, PRIMARY_SURFACE_TMZ, mask_sh),\
+ HUBP_SF(HUBPREQ0_DCSURF_SURFACE_CONTROL, PRIMARY_SURFACE_TMZ_C, mask_sh),\
+ HUBP_SF(HUBPREQ0_DCSURF_SURFACE_CONTROL, PRIMARY_META_SURFACE_TMZ, mask_sh),\
+ HUBP_SF(HUBPREQ0_DCSURF_SURFACE_CONTROL, PRIMARY_META_SURFACE_TMZ_C, mask_sh),\
+ HUBP_SF(HUBPREQ0_DCSURF_SURFACE_CONTROL, PRIMARY_SURFACE_DCC_EN, mask_sh),\
+ HUBP_SF(HUBPREQ0_DCSURF_SURFACE_CONTROL, PRIMARY_SURFACE_DCC_IND_BLK, mask_sh),\
+ HUBP_SF(HUBPREQ0_DCSURF_SURFACE_CONTROL, PRIMARY_SURFACE_DCC_IND_BLK_C, mask_sh),\
+ HUBP_SF(HUBPREQ0_DCSURF_SURFACE_CONTROL, SECONDARY_SURFACE_TMZ, mask_sh),\
+ HUBP_SF(HUBPREQ0_DCSURF_SURFACE_CONTROL, SECONDARY_SURFACE_TMZ_C, mask_sh),\
+ HUBP_SF(HUBPREQ0_DCSURF_SURFACE_CONTROL, SECONDARY_META_SURFACE_TMZ, mask_sh),\
+ HUBP_SF(HUBPREQ0_DCSURF_SURFACE_CONTROL, SECONDARY_META_SURFACE_TMZ_C, mask_sh),\
+ HUBP_SF(HUBPREQ0_DCSURF_SURFACE_CONTROL, SECONDARY_SURFACE_DCC_EN, mask_sh),\
+ HUBP_SF(HUBPREQ0_DCSURF_SURFACE_CONTROL, SECONDARY_SURFACE_DCC_IND_BLK, mask_sh),\
+ HUBP_SF(HUBPREQ0_DCSURF_SURFACE_CONTROL, SECONDARY_SURFACE_DCC_IND_BLK_C, mask_sh),\
+ HUBP_SF(HUBPRET0_HUBPRET_CONTROL, DET_BUF_PLANE1_BASE_ADDRESS, mask_sh),\
+ HUBP_SF(HUBPRET0_HUBPRET_CONTROL, CROSSBAR_SRC_CB_B, mask_sh),\
+ HUBP_SF(HUBPRET0_HUBPRET_CONTROL, CROSSBAR_SRC_CR_R, mask_sh),\
+ HUBP_SF(HUBPRET0_HUBPRET_CONTROL, CROSSBAR_SRC_Y_G, mask_sh),\
+ HUBP_SF(HUBPRET0_HUBPRET_CONTROL, CROSSBAR_SRC_ALPHA, mask_sh),\
+ HUBP_SF(HUBPRET0_HUBPRET_CONTROL, PACK_3TO2_ELEMENT_DISABLE, mask_sh),\
+ HUBP_SF(HUBPREQ0_DCN_EXPANSION_MODE, DRQ_EXPANSION_MODE, mask_sh),\
+ HUBP_SF(HUBPREQ0_DCN_EXPANSION_MODE, PRQ_EXPANSION_MODE, mask_sh),\
+ HUBP_SF(HUBPREQ0_DCN_EXPANSION_MODE, MRQ_EXPANSION_MODE, mask_sh),\
+ HUBP_SF(HUBPREQ0_DCN_EXPANSION_MODE, CRQ_EXPANSION_MODE, mask_sh),\
+ HUBP_SF(HUBP0_DCHUBP_REQ_SIZE_CONFIG, CHUNK_SIZE, mask_sh),\
+ HUBP_SF(HUBP0_DCHUBP_REQ_SIZE_CONFIG, MIN_CHUNK_SIZE, mask_sh),\
+ HUBP_SF(HUBP0_DCHUBP_REQ_SIZE_CONFIG, META_CHUNK_SIZE, mask_sh),\
+ HUBP_SF(HUBP0_DCHUBP_REQ_SIZE_CONFIG, MIN_META_CHUNK_SIZE, mask_sh),\
+ HUBP_SF(HUBP0_DCHUBP_REQ_SIZE_CONFIG, DPTE_GROUP_SIZE, mask_sh),\
+ HUBP_SF(HUBP0_DCHUBP_REQ_SIZE_CONFIG, SWATH_HEIGHT, mask_sh),\
+ HUBP_SF(HUBP0_DCHUBP_REQ_SIZE_CONFIG, PTE_ROW_HEIGHT_LINEAR, mask_sh),\
+ HUBP_SF(HUBP0_DCHUBP_REQ_SIZE_CONFIG_C, CHUNK_SIZE_C, mask_sh),\
+ HUBP_SF(HUBP0_DCHUBP_REQ_SIZE_CONFIG_C, MIN_CHUNK_SIZE_C, mask_sh),\
+ HUBP_SF(HUBP0_DCHUBP_REQ_SIZE_CONFIG_C, META_CHUNK_SIZE_C, mask_sh),\
+ HUBP_SF(HUBP0_DCHUBP_REQ_SIZE_CONFIG_C, MIN_META_CHUNK_SIZE_C, mask_sh),\
+ HUBP_SF(HUBP0_DCHUBP_REQ_SIZE_CONFIG_C, DPTE_GROUP_SIZE_C, mask_sh),\
+ HUBP_SF(HUBP0_DCHUBP_REQ_SIZE_CONFIG_C, SWATH_HEIGHT_C, mask_sh),\
+ HUBP_SF(HUBP0_DCHUBP_REQ_SIZE_CONFIG_C, PTE_ROW_HEIGHT_LINEAR_C, mask_sh),\
+ HUBP_SF(HUBPREQ0_BLANK_OFFSET_0, REFCYC_H_BLANK_END, mask_sh),\
+ HUBP_SF(HUBPREQ0_BLANK_OFFSET_0, DLG_V_BLANK_END, mask_sh),\
+ HUBP_SF(HUBPREQ0_BLANK_OFFSET_1, MIN_DST_Y_NEXT_START, mask_sh),\
+ HUBP_SF(HUBPREQ0_DST_DIMENSIONS, REFCYC_PER_HTOTAL, mask_sh),\
+ HUBP_SF(HUBPREQ0_DST_AFTER_SCALER, REFCYC_X_AFTER_SCALER, mask_sh),\
+ HUBP_SF(HUBPREQ0_DST_AFTER_SCALER, DST_Y_AFTER_SCALER, mask_sh),\
+ HUBP_SF(HUBPREQ0_VBLANK_PARAMETERS_0, DST_Y_PER_VM_VBLANK, mask_sh),\
+ HUBP_SF(HUBPREQ0_VBLANK_PARAMETERS_0, DST_Y_PER_ROW_VBLANK, mask_sh),\
+ HUBP_SF(HUBPREQ0_REF_FREQ_TO_PIX_FREQ, REF_FREQ_TO_PIX_FREQ, mask_sh),\
+ HUBP_SF(HUBPREQ0_VBLANK_PARAMETERS_1, REFCYC_PER_PTE_GROUP_VBLANK_L, mask_sh),\
+ HUBP_SF(HUBPREQ0_VBLANK_PARAMETERS_3, REFCYC_PER_META_CHUNK_VBLANK_L, mask_sh),\
+ HUBP_SF(HUBPREQ0_NOM_PARAMETERS_4, DST_Y_PER_META_ROW_NOM_L, mask_sh),\
+ HUBP_SF(HUBPREQ0_NOM_PARAMETERS_5, REFCYC_PER_META_CHUNK_NOM_L, mask_sh),\
+ HUBP_SF(HUBPREQ0_PER_LINE_DELIVERY_PRE, REFCYC_PER_LINE_DELIVERY_PRE_L, mask_sh),\
+ HUBP_SF(HUBPREQ0_PER_LINE_DELIVERY_PRE, REFCYC_PER_LINE_DELIVERY_PRE_C, mask_sh),\
+ HUBP_SF(HUBPREQ0_PER_LINE_DELIVERY, REFCYC_PER_LINE_DELIVERY_L, mask_sh),\
+ HUBP_SF(HUBPREQ0_PER_LINE_DELIVERY, REFCYC_PER_LINE_DELIVERY_C, mask_sh),\
+ HUBP_SF(HUBPREQ0_VBLANK_PARAMETERS_2, REFCYC_PER_PTE_GROUP_VBLANK_C, mask_sh),\
+ HUBP_SF(HUBPREQ0_VBLANK_PARAMETERS_4, REFCYC_PER_META_CHUNK_VBLANK_C, mask_sh),\
+ HUBP_SF(HUBPREQ0_NOM_PARAMETERS_6, DST_Y_PER_META_ROW_NOM_C, mask_sh),\
+ HUBP_SF(HUBPREQ0_NOM_PARAMETERS_7, REFCYC_PER_META_CHUNK_NOM_C, mask_sh),\
+ HUBP_SF(HUBPREQ0_DCN_TTU_QOS_WM, QoS_LEVEL_LOW_WM, mask_sh),\
+ HUBP_SF(HUBPREQ0_DCN_TTU_QOS_WM, QoS_LEVEL_HIGH_WM, mask_sh),\
+ HUBP_SF(HUBPREQ0_DCN_GLOBAL_TTU_CNTL, MIN_TTU_VBLANK, mask_sh),\
+ HUBP_SF(HUBPREQ0_DCN_GLOBAL_TTU_CNTL, QoS_LEVEL_FLIP, mask_sh),\
+ HUBP_SF(HUBPREQ0_DCN_GLOBAL_TTU_CNTL, ROW_TTU_MODE, mask_sh),\
+ HUBP_SF(HUBPREQ0_DCN_SURF0_TTU_CNTL0, REFCYC_PER_REQ_DELIVERY, mask_sh),\
+ HUBP_SF(HUBPREQ0_DCN_SURF0_TTU_CNTL0, QoS_LEVEL_FIXED, mask_sh),\
+ HUBP_SF(HUBPREQ0_DCN_SURF0_TTU_CNTL0, QoS_RAMP_DISABLE, mask_sh),\
+ HUBP_SF(HUBPREQ0_DCN_SURF0_TTU_CNTL1, REFCYC_PER_REQ_DELIVERY_PRE, mask_sh),\
+ HUBP_SF(HUBP0_HUBP_CLK_CNTL, HUBP_CLOCK_ENABLE, mask_sh),\
+ HUBP_MASK_SH_LIST_DCN_VM(mask_sh),\
+ HUBP_SF(HUBP0_DCSURF_SURFACE_CONFIG, ROTATION_ANGLE, mask_sh),\
+ HUBP_SF(HUBP0_DCSURF_SURFACE_CONFIG, H_MIRROR_EN, mask_sh),\
+ HUBP_SF(HUBP0_DCSURF_SURFACE_CONFIG, ALPHA_PLANE_EN, mask_sh),\
+ HUBP_SF(HUBPREQ0_PREFETCH_SETTINGS, DST_Y_PREFETCH, mask_sh),\
+ HUBP_SF(HUBPREQ0_PREFETCH_SETTINGS, VRATIO_PREFETCH, mask_sh),\
+ HUBP_SF(HUBPREQ0_PREFETCH_SETTINGS_C, VRATIO_PREFETCH_C, mask_sh),\
+ HUBP_SF(HUBPREQ0_DCN_VM_SYSTEM_APERTURE_LOW_ADDR, MC_VM_SYSTEM_APERTURE_LOW_ADDR, mask_sh),\
+ HUBP_SF(HUBPREQ0_DCN_VM_SYSTEM_APERTURE_HIGH_ADDR, MC_VM_SYSTEM_APERTURE_HIGH_ADDR, mask_sh),\
+ HUBP_SF(HUBPREQ0_CURSOR_SETTINGS, CURSOR0_DST_Y_OFFSET, mask_sh), \
+ HUBP_SF(HUBPREQ0_CURSOR_SETTINGS, CURSOR0_CHUNK_HDL_ADJUST, mask_sh), \
+ HUBP_SF(CURSOR0_0_CURSOR_SURFACE_ADDRESS_HIGH, CURSOR_SURFACE_ADDRESS_HIGH, mask_sh), \
+ HUBP_SF(CURSOR0_0_CURSOR_SURFACE_ADDRESS, CURSOR_SURFACE_ADDRESS, mask_sh), \
+ HUBP_SF(CURSOR0_0_CURSOR_SIZE, CURSOR_WIDTH, mask_sh), \
+ HUBP_SF(CURSOR0_0_CURSOR_SIZE, CURSOR_HEIGHT, mask_sh), \
+ HUBP_SF(CURSOR0_0_CURSOR_CONTROL, CURSOR_MODE, mask_sh), \
+ HUBP_SF(CURSOR0_0_CURSOR_CONTROL, CURSOR_2X_MAGNIFY, mask_sh), \
+ HUBP_SF(CURSOR0_0_CURSOR_CONTROL, CURSOR_PITCH, mask_sh), \
+ HUBP_SF(CURSOR0_0_CURSOR_CONTROL, CURSOR_LINES_PER_CHUNK, mask_sh), \
+ HUBP_SF(CURSOR0_0_CURSOR_CONTROL, CURSOR_ENABLE, mask_sh), \
+ HUBP_SF(CURSOR0_0_CURSOR_POSITION, CURSOR_X_POSITION, mask_sh), \
+ HUBP_SF(CURSOR0_0_CURSOR_POSITION, CURSOR_Y_POSITION, mask_sh), \
+ HUBP_SF(CURSOR0_0_CURSOR_HOT_SPOT, CURSOR_HOT_SPOT_X, mask_sh), \
+ HUBP_SF(CURSOR0_0_CURSOR_HOT_SPOT, CURSOR_HOT_SPOT_Y, mask_sh), \
+ HUBP_SF(CURSOR0_0_CURSOR_DST_OFFSET, CURSOR_DST_X_OFFSET, mask_sh), \
+ HUBP_SF(CURSOR0_0_DMDATA_ADDRESS_HIGH, DMDATA_ADDRESS_HIGH, mask_sh), \
+ HUBP_SF(CURSOR0_0_DMDATA_CNTL, DMDATA_MODE, mask_sh), \
+ HUBP_SF(CURSOR0_0_DMDATA_CNTL, DMDATA_UPDATED, mask_sh), \
+ HUBP_SF(CURSOR0_0_DMDATA_CNTL, DMDATA_REPEAT, mask_sh), \
+ HUBP_SF(CURSOR0_0_DMDATA_CNTL, DMDATA_SIZE, mask_sh), \
+ HUBP_SF(CURSOR0_0_DMDATA_SW_CNTL, DMDATA_SW_UPDATED, mask_sh), \
+ HUBP_SF(CURSOR0_0_DMDATA_SW_CNTL, DMDATA_SW_REPEAT, mask_sh), \
+ HUBP_SF(CURSOR0_0_DMDATA_SW_CNTL, DMDATA_SW_SIZE, mask_sh), \
+ HUBP_SF(CURSOR0_0_DMDATA_QOS_CNTL, DMDATA_QOS_MODE, mask_sh), \
+ HUBP_SF(CURSOR0_0_DMDATA_QOS_CNTL, DMDATA_QOS_LEVEL, mask_sh), \
+ HUBP_SF(CURSOR0_0_DMDATA_QOS_CNTL, DMDATA_DL_DELTA, mask_sh), \
+ HUBP_SF(CURSOR0_0_DMDATA_STATUS, DMDATA_DONE, mask_sh),\
+ HUBP_SF(HUBPREQ0_FLIP_PARAMETERS_0, DST_Y_PER_VM_FLIP, mask_sh),\
+ HUBP_SF(HUBPREQ0_FLIP_PARAMETERS_0, DST_Y_PER_ROW_FLIP, mask_sh),\
+ HUBP_SF(HUBPREQ0_FLIP_PARAMETERS_1, REFCYC_PER_PTE_GROUP_FLIP_L, mask_sh),\
+ HUBP_SF(HUBPREQ0_FLIP_PARAMETERS_2, REFCYC_PER_META_CHUNK_FLIP_L, mask_sh),\
+ HUBP_SF(HUBP0_DCHUBP_CNTL, HUBP_VREADY_AT_OR_AFTER_VSYNC, mask_sh),\
+ HUBP_SF(HUBP0_DCHUBP_CNTL, HUBP_DISABLE_STOP_DATA_DURING_VM, mask_sh),\
+ HUBP_SF(HUBPREQ0_DCSURF_FLIP_CONTROL, HUBPREQ_MASTER_UPDATE_LOCK_STATUS, mask_sh),\
+ HUBP_SF(HUBPREQ0_DCSURF_FLIP_CONTROL2, SURFACE_GSL_ENABLE, mask_sh),\
+ HUBP_SF(HUBPREQ0_DCSURF_FLIP_CONTROL2, SURFACE_TRIPLE_BUFFER_ENABLE, mask_sh),\
+ HUBP_SF(HUBPREQ0_VMID_SETTINGS_0, VMID, mask_sh),\
+ HUBP_SF(HUBPREQ0_FLIP_PARAMETERS_3, REFCYC_PER_VM_GROUP_FLIP, mask_sh),\
+ HUBP_SF(HUBPREQ0_FLIP_PARAMETERS_4, REFCYC_PER_VM_REQ_FLIP, mask_sh),\
+ HUBP_SF(HUBPREQ0_FLIP_PARAMETERS_5, REFCYC_PER_PTE_GROUP_FLIP_C, mask_sh),\
+ HUBP_SF(HUBPREQ0_FLIP_PARAMETERS_6, REFCYC_PER_META_CHUNK_FLIP_C, mask_sh),\
+ HUBP_SF(HUBPREQ0_VBLANK_PARAMETERS_5, REFCYC_PER_VM_GROUP_VBLANK, mask_sh),\
+ HUBP_SF(HUBPREQ0_VBLANK_PARAMETERS_6, REFCYC_PER_VM_REQ_VBLANK, mask_sh),\
+ HUBP_SF(HUBP0_DCHUBP_REQ_SIZE_CONFIG, VM_GROUP_SIZE, mask_sh)
+
+bool hubp3_construct(
+ struct dcn20_hubp *hubp2,
+ struct dc_context *ctx,
+ uint32_t inst,
+ const struct dcn_hubp2_registers *hubp_regs,
+ const struct dcn_hubp2_shift *hubp_shift,
+ const struct dcn_hubp2_mask *hubp_mask);
+
+bool hubp3_program_surface_flip_and_addr(
+ struct hubp *hubp,
+ const struct dc_plane_address *address,
+ bool flip_immediate);
+
+void hubp3_program_surface_config(
+ struct hubp *hubp,
+ enum surface_pixel_format format,
+ union dc_tiling_info *tiling_info,
+ struct plane_size *plane_size,
+ enum dc_rotation_angle rotation,
+ struct dc_plane_dcc_param *dcc,
+ bool horizontal_mirror,
+ unsigned int compat_level);
+
+void hubp3_setup(
+ struct hubp *hubp,
+ struct _vcs_dpi_display_dlg_regs_st *dlg_attr,
+ struct _vcs_dpi_display_ttu_regs_st *ttu_attr,
+ struct _vcs_dpi_display_rq_regs_st *rq_regs,
+ struct _vcs_dpi_display_pipe_dest_params_st *pipe_dest);
+
+void hubp3_dcc_control(struct hubp *hubp, bool enable,
+ enum hubp_ind_block_size blk_size);
+
+void hubp3_dcc_control_sienna_cichlid(struct hubp *hubp,
+ struct dc_plane_dcc_param *dcc);
+
+void hubp3_dmdata_set_attributes(
+ struct hubp *hubp,
+ const struct dc_dmdata_attributes *attr);
+
+void hubp3_read_state(struct hubp *hubp);
+
+void hubp3_init(struct hubp *hubp);
+
+#endif /* __DC_HUBP_DCN30_H__ */
+
+
diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hwseq.c b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hwseq.c
new file mode 100644
index 000000000000..a5d750ed569e
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hwseq.c
@@ -0,0 +1,719 @@
+/*
+ * Copyright 2020 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+
+#include "dm_services.h"
+#include "dm_helpers.h"
+#include "core_types.h"
+#include "resource.h"
+#include "dcn30_hwseq.h"
+#include "dccg.h"
+#include "dce/dce_hwseq.h"
+#include "dcn30_mpc.h"
+#include "dcn30_dpp.h"
+#include "dcn10/dcn10_cm_common.h"
+#include "dcn30_cm_common.h"
+#include "clk_mgr.h"
+#include "reg_helper.h"
+#include "abm.h"
+#include "clk_mgr.h"
+#include "hubp.h"
+#include "dchubbub.h"
+#include "timing_generator.h"
+#include "opp.h"
+#include "ipp.h"
+#include "mpc.h"
+#include "mcif_wb.h"
+#include "dc_dmub_srv.h"
+#include "link_hwss.h"
+#include "dpcd_defs.h"
+
+
+
+
+#define DC_LOGGER_INIT(logger)
+
+#define CTX \
+ hws->ctx
+#define REG(reg)\
+ hws->regs->reg
+#define DC_LOGGER \
+ dc->ctx->logger
+
+
+#undef FN
+#define FN(reg_name, field_name) \
+ hws->shifts->field_name, hws->masks->field_name
+
+bool dcn30_set_blend_lut(
+ struct pipe_ctx *pipe_ctx, const struct dc_plane_state *plane_state)
+{
+ struct dpp *dpp_base = pipe_ctx->plane_res.dpp;
+ bool result = true;
+ struct pwl_params *blend_lut = NULL;
+
+ if (plane_state->blend_tf) {
+ if (plane_state->blend_tf->type == TF_TYPE_HWPWL)
+ blend_lut = &plane_state->blend_tf->pwl;
+ else if (plane_state->blend_tf->type == TF_TYPE_DISTRIBUTED_POINTS) {
+ cm3_helper_translate_curve_to_hw_format(
+ plane_state->blend_tf, &dpp_base->regamma_params, false);
+ blend_lut = &dpp_base->regamma_params;
+ }
+ }
+ result = dpp_base->funcs->dpp_program_blnd_lut(dpp_base, blend_lut);
+
+ return result;
+}
+
+static bool dcn30_set_mpc_shaper_3dlut(
+ struct pipe_ctx *pipe_ctx, const struct dc_stream_state *stream)
+{
+ struct dpp *dpp_base = pipe_ctx->plane_res.dpp;
+ int mpcc_id = pipe_ctx->plane_res.hubp->inst;
+ struct mpc *mpc = pipe_ctx->stream_res.opp->ctx->dc->res_pool->mpc;
+ bool result = false;
+ int acquired_rmu = 0;
+ int mpcc_id_projected = 0;
+
+ const struct pwl_params *shaper_lut = NULL;
+ //get the shaper lut params
+ if (stream->func_shaper) {
+ if (stream->func_shaper->type == TF_TYPE_HWPWL)
+ shaper_lut = &stream->func_shaper->pwl;
+ else if (stream->func_shaper->type == TF_TYPE_DISTRIBUTED_POINTS) {
+ cm_helper_translate_curve_to_hw_format(
+ stream->func_shaper,
+ &dpp_base->shaper_params, true);
+ shaper_lut = &dpp_base->shaper_params;
+ }
+ }
+
+ if (stream->lut3d_func &&
+ stream->lut3d_func->state.bits.initialized == 1 &&
+ stream->lut3d_func->state.bits.rmu_idx_valid == 1) {
+ if (stream->lut3d_func->state.bits.rmu_mux_num == 0)
+ mpcc_id_projected = stream->lut3d_func->state.bits.mpc_rmu0_mux;
+ else if (stream->lut3d_func->state.bits.rmu_mux_num == 1)
+ mpcc_id_projected = stream->lut3d_func->state.bits.mpc_rmu1_mux;
+ else if (stream->lut3d_func->state.bits.rmu_mux_num == 2)
+ mpcc_id_projected = stream->lut3d_func->state.bits.mpc_rmu2_mux;
+ if (mpcc_id_projected != mpcc_id)
+ BREAK_TO_DEBUGGER();
+ /*find the reason why logical layer assigned a differant mpcc_id into acquire_post_bldn_3dlut*/
+ acquired_rmu = mpc->funcs->acquire_rmu(mpc, mpcc_id,
+ stream->lut3d_func->state.bits.rmu_mux_num);
+ if (acquired_rmu != stream->lut3d_func->state.bits.rmu_mux_num)
+ BREAK_TO_DEBUGGER();
+ result = mpc->funcs->program_3dlut(mpc,
+ &stream->lut3d_func->lut_3d,
+ stream->lut3d_func->state.bits.rmu_mux_num);
+ result = mpc->funcs->program_shaper(mpc, shaper_lut,
+ stream->lut3d_func->state.bits.rmu_mux_num);
+ } else
+ /*loop through the available mux and release the requested mpcc_id*/
+ mpc->funcs->release_rmu(mpc, mpcc_id);
+
+
+ return result;
+}
+
+bool dcn30_set_input_transfer_func(struct dc *dc,
+ struct pipe_ctx *pipe_ctx,
+ const struct dc_plane_state *plane_state)
+{
+ struct dce_hwseq *hws = dc->hwseq;
+ struct dpp *dpp_base = pipe_ctx->plane_res.dpp;
+ enum dc_transfer_func_predefined tf;
+ bool result = true;
+ struct pwl_params *params = NULL;
+
+ if (dpp_base == NULL || plane_state == NULL)
+ return false;
+
+ tf = TRANSFER_FUNCTION_UNITY;
+
+ if (plane_state->in_transfer_func &&
+ plane_state->in_transfer_func->type == TF_TYPE_PREDEFINED)
+ tf = plane_state->in_transfer_func->tf;
+
+ dpp_base->funcs->dpp_set_pre_degam(dpp_base, tf);
+
+ if (plane_state->in_transfer_func) {
+ if (plane_state->in_transfer_func->type == TF_TYPE_HWPWL)
+ params = &plane_state->in_transfer_func->pwl;
+ else if (plane_state->in_transfer_func->type == TF_TYPE_DISTRIBUTED_POINTS &&
+ cm3_helper_translate_curve_to_hw_format(plane_state->in_transfer_func,
+ &dpp_base->degamma_params, false))
+ params = &dpp_base->degamma_params;
+ }
+
+ result = dpp_base->funcs->dpp_program_gamcor_lut(dpp_base, params);
+
+ if (pipe_ctx->stream_res.opp && pipe_ctx->stream_res.opp->ctx) {
+ if (dpp_base->funcs->dpp_program_blnd_lut)
+ hws->funcs.set_blend_lut(pipe_ctx, plane_state);
+ if (dpp_base->funcs->dpp_program_shaper_lut &&
+ dpp_base->funcs->dpp_program_3dlut)
+ hws->funcs.set_shaper_3dlut(pipe_ctx, plane_state);
+ }
+
+ return result;
+}
+
+bool dcn30_set_output_transfer_func(struct dc *dc,
+ struct pipe_ctx *pipe_ctx,
+ const struct dc_stream_state *stream)
+{
+ int mpcc_id = pipe_ctx->plane_res.hubp->inst;
+ struct mpc *mpc = pipe_ctx->stream_res.opp->ctx->dc->res_pool->mpc;
+ struct pwl_params *params = NULL;
+ bool ret = false;
+
+ /* program OGAM or 3DLUT only for the top pipe*/
+ if (pipe_ctx->top_pipe == NULL) {
+ /*program rmu shaper and 3dlut in MPC*/
+ ret = dcn30_set_mpc_shaper_3dlut(pipe_ctx, stream);
+ if (ret == false && mpc->funcs->set_output_gamma && stream->out_transfer_func) {
+ if (stream->out_transfer_func->type == TF_TYPE_HWPWL)
+ params = &stream->out_transfer_func->pwl;
+ else if (pipe_ctx->stream->out_transfer_func->type ==
+ TF_TYPE_DISTRIBUTED_POINTS &&
+ cm3_helper_translate_curve_to_hw_format(
+ stream->out_transfer_func,
+ &mpc->blender_params, false))
+ params = &mpc->blender_params;
+ /* there are no ROM LUTs in OUTGAM */
+ if (stream->out_transfer_func->type == TF_TYPE_PREDEFINED)
+ BREAK_TO_DEBUGGER();
+ }
+ }
+
+ mpc->funcs->set_output_gamma(mpc, mpcc_id, params);
+ return ret;
+}
+
+static void dcn30_set_writeback(
+ struct dc *dc,
+ struct dc_writeback_info *wb_info,
+ struct dc_state *context)
+{
+ struct dwbc *dwb;
+ struct mcif_wb *mcif_wb;
+ struct mcif_buf_params *mcif_buf_params;
+
+ ASSERT(wb_info->dwb_pipe_inst < MAX_DWB_PIPES);
+ ASSERT(wb_info->wb_enabled);
+ ASSERT(wb_info->mpcc_inst >= 0);
+ ASSERT(wb_info->mpcc_inst < 4);
+ dwb = dc->res_pool->dwbc[wb_info->dwb_pipe_inst];
+ mcif_wb = dc->res_pool->mcif_wb[wb_info->dwb_pipe_inst];
+ mcif_buf_params = &wb_info->mcif_buf_params;
+
+ /* set DWB MPC mux */
+ dc->res_pool->mpc->funcs->set_dwb_mux(dc->res_pool->mpc,
+ wb_info->dwb_pipe_inst, wb_info->mpcc_inst);
+ /* set MCIF_WB buffer and arbitration configuration */
+ mcif_wb->funcs->config_mcif_buf(mcif_wb, mcif_buf_params, wb_info->dwb_params.dest_height);
+ mcif_wb->funcs->config_mcif_arb(mcif_wb, &context->bw_ctx.bw.dcn.bw_writeback.mcif_wb_arb[wb_info->dwb_pipe_inst]);
+}
+
+void dcn30_update_writeback(
+ struct dc *dc,
+ struct dc_writeback_info *wb_info,
+ struct dc_state *context)
+{
+ struct dwbc *dwb;
+ dwb = dc->res_pool->dwbc[wb_info->dwb_pipe_inst];
+ DC_LOG_DWB("%s dwb_pipe_inst = %d, mpcc_inst = %d",\
+ __func__, wb_info->dwb_pipe_inst,\
+ wb_info->mpcc_inst);
+
+ dcn30_set_writeback(dc, wb_info, context);
+
+ /* update DWB */
+ dwb->funcs->update(dwb, &wb_info->dwb_params);
+}
+
+bool dcn30_mmhubbub_warmup(
+ struct dc *dc,
+ unsigned int num_dwb,
+ struct dc_writeback_info *wb_info)
+{
+ struct dwbc *dwb;
+ struct mcif_wb *mcif_wb;
+ struct mcif_warmup_params warmup_params = {0};
+ unsigned int i, i_buf;
+ /*make sure there is no active DWB eanbled */
+ for (i = 0; i < num_dwb; i++) {
+ dwb = dc->res_pool->dwbc[wb_info[i].dwb_pipe_inst];
+ if (dwb->dwb_is_efc_transition || dwb->dwb_is_drc) {
+ /*can not do warmup while any dwb enabled*/
+ return false;
+ }
+ }
+
+ if (wb_info->mcif_warmup_params.p_vmid == 0)
+ return false;
+
+ /*check whether this is new interface: warmup big buffer once*/
+ if (wb_info->mcif_warmup_params.start_address.quad_part != 0 &&
+ wb_info->mcif_warmup_params.region_size != 0) {
+ /*mmhubbub is shared, so it does not matter which MCIF*/
+ mcif_wb = dc->res_pool->mcif_wb[0];
+ /*warmup a big chunk of VM buffer at once*/
+ warmup_params.start_address.quad_part = wb_info->mcif_warmup_params.start_address.quad_part;
+ warmup_params.address_increment = wb_info->mcif_warmup_params.region_size;
+ warmup_params.region_size = wb_info->mcif_warmup_params.region_size;
+ warmup_params.p_vmid = wb_info->mcif_warmup_params.p_vmid;
+
+ if (warmup_params.address_increment == 0)
+ warmup_params.address_increment = dc->dml.soc.vmm_page_size_bytes;
+
+ mcif_wb->funcs->warmup_mcif(mcif_wb, &warmup_params);
+ return true;
+ }
+ /*following is the original: warmup each DWB's mcif buffer*/
+ for (i = 0; i < num_dwb; i++) {
+ dwb = dc->res_pool->dwbc[wb_info[i].dwb_pipe_inst];
+ mcif_wb = dc->res_pool->mcif_wb[wb_info[i].dwb_pipe_inst];
+ /*warmup is for VM mode only*/
+ if (wb_info[i].mcif_buf_params.p_vmid == 0)
+ return false;
+
+ /* Warmup MCIF_WB */
+ for (i_buf = 0; i_buf < MCIF_BUF_COUNT; i_buf++) {
+ warmup_params.start_address.quad_part = wb_info[i].mcif_buf_params.luma_address[i_buf];
+ warmup_params.address_increment = dc->dml.soc.vmm_page_size_bytes;
+ warmup_params.region_size = wb_info[i].mcif_buf_params.luma_pitch * wb_info[i].dwb_params.dest_height;
+ warmup_params.p_vmid = wb_info[i].mcif_buf_params.p_vmid;
+ mcif_wb->funcs->warmup_mcif(mcif_wb, &warmup_params);
+ }
+ }
+ return true;
+}
+
+void dcn30_enable_writeback(
+ struct dc *dc,
+ struct dc_writeback_info *wb_info,
+ struct dc_state *context)
+{
+ struct dwbc *dwb;
+ struct mcif_wb *mcif_wb;
+ struct timing_generator *optc;
+
+ dwb = dc->res_pool->dwbc[wb_info->dwb_pipe_inst];
+ mcif_wb = dc->res_pool->mcif_wb[wb_info->dwb_pipe_inst];
+
+ /* set the OPTC source mux */
+ optc = dc->res_pool->timing_generators[dwb->otg_inst];
+ DC_LOG_DWB("%s dwb_pipe_inst = %d, mpcc_inst = %d",\
+ __func__, wb_info->dwb_pipe_inst,\
+ wb_info->mpcc_inst);
+ if (IS_DIAG_DC(dc->ctx->dce_environment)) {
+ /*till diags switch to warmup interface*/
+ dcn30_mmhubbub_warmup(dc, 1, wb_info);
+ }
+ /* Update writeback pipe */
+ dcn30_set_writeback(dc, wb_info, context);
+
+ /* Enable MCIF_WB */
+ mcif_wb->funcs->enable_mcif(mcif_wb);
+ /* Enable DWB */
+ dwb->funcs->enable(dwb, &wb_info->dwb_params);
+}
+
+void dcn30_disable_writeback(
+ struct dc *dc,
+ unsigned int dwb_pipe_inst)
+{
+ struct dwbc *dwb;
+ struct mcif_wb *mcif_wb;
+
+ ASSERT(dwb_pipe_inst < MAX_DWB_PIPES);
+ dwb = dc->res_pool->dwbc[dwb_pipe_inst];
+ mcif_wb = dc->res_pool->mcif_wb[dwb_pipe_inst];
+ DC_LOG_DWB("%s dwb_pipe_inst = %d",\
+ __func__, dwb_pipe_inst);
+
+ /* disable DWB */
+ dwb->funcs->disable(dwb);
+ /* disable MCIF */
+ mcif_wb->funcs->disable_mcif(mcif_wb);
+ /* disable MPC DWB mux */
+ dc->res_pool->mpc->funcs->disable_dwb_mux(dc->res_pool->mpc, dwb_pipe_inst);
+}
+
+void dcn30_program_all_writeback_pipes_in_tree(
+ struct dc *dc,
+ const struct dc_stream_state *stream,
+ struct dc_state *context)
+{
+ struct dc_writeback_info wb_info;
+ struct dwbc *dwb;
+ struct dc_stream_status *stream_status = NULL;
+ int i_wb, i_pipe, i_stream;
+ DC_LOG_DWB("%s", __func__);
+
+ ASSERT(stream);
+ for (i_stream = 0; i_stream < context->stream_count; i_stream++) {
+ if (context->streams[i_stream] == stream) {
+ stream_status = &context->stream_status[i_stream];
+ break;
+ }
+ }
+ ASSERT(stream_status);
+
+ ASSERT(stream->num_wb_info <= dc->res_pool->res_cap->num_dwb);
+ /* For each writeback pipe */
+ for (i_wb = 0; i_wb < stream->num_wb_info; i_wb++) {
+
+ /* copy writeback info to local non-const so mpcc_inst can be set */
+ wb_info = stream->writeback_info[i_wb];
+ if (wb_info.wb_enabled) {
+
+ /* get the MPCC instance for writeback_source_plane */
+ wb_info.mpcc_inst = -1;
+ for (i_pipe = 0; i_pipe < dc->res_pool->pipe_count; i_pipe++) {
+ struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i_pipe];
+
+ if (pipe_ctx->plane_state == wb_info.writeback_source_plane) {
+ wb_info.mpcc_inst = pipe_ctx->plane_res.mpcc_inst;
+ break;
+ }
+ }
+ ASSERT(wb_info.mpcc_inst != -1);
+
+ ASSERT(wb_info.dwb_pipe_inst < dc->res_pool->res_cap->num_dwb);
+ dwb = dc->res_pool->dwbc[wb_info.dwb_pipe_inst];
+ if (dwb->funcs->is_enabled(dwb)) {
+ /* writeback pipe already enabled, only need to update */
+ dc->hwss.update_writeback(dc, &wb_info, context);
+ } else {
+ /* Enable writeback pipe and connect to MPCC */
+ dc->hwss.enable_writeback(dc, &wb_info, context);
+ }
+ } else {
+ /* Disable writeback pipe and disconnect from MPCC */
+ dc->hwss.disable_writeback(dc, wb_info.dwb_pipe_inst);
+ }
+ }
+}
+
+void dcn30_init_hw(struct dc *dc)
+{
+ int i, j;
+ struct abm **abms = dc->res_pool->multiple_abms;
+ struct dce_hwseq *hws = dc->hwseq;
+ struct dc_bios *dcb = dc->ctx->dc_bios;
+ struct resource_pool *res_pool = dc->res_pool;
+ uint32_t backlight = MAX_BACKLIGHT_LEVEL;
+
+ if (dc->clk_mgr && dc->clk_mgr->funcs->init_clocks)
+ dc->clk_mgr->funcs->init_clocks(dc->clk_mgr);
+
+ // Initialize the dccg
+ if (res_pool->dccg->funcs->dccg_init)
+ res_pool->dccg->funcs->dccg_init(res_pool->dccg);
+
+ if (IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment)) {
+
+ REG_WRITE(REFCLK_CNTL, 0);
+ REG_UPDATE(DCHUBBUB_GLOBAL_TIMER_CNTL, DCHUBBUB_GLOBAL_TIMER_ENABLE, 1);
+ REG_WRITE(DIO_MEM_PWR_CTRL, 0);
+
+ if (!dc->debug.disable_clock_gate) {
+ /* enable all DCN clock gating */
+ REG_WRITE(DCCG_GATE_DISABLE_CNTL, 0);
+
+ REG_WRITE(DCCG_GATE_DISABLE_CNTL2, 0);
+
+ REG_UPDATE(DCFCLK_CNTL, DCFCLK_GATE_DIS, 0);
+ }
+
+ //Enable ability to power gate / don't force power on permanently
+ if (hws->funcs.enable_power_gating_plane)
+ hws->funcs.enable_power_gating_plane(hws, true);
+
+ return;
+ }
+
+ if (!dcb->funcs->is_accelerated_mode(dcb)) {
+ hws->funcs.bios_golden_init(dc);
+ hws->funcs.disable_vga(dc->hwseq);
+ }
+
+ if (dc->ctx->dc_bios->fw_info_valid) {
+ res_pool->ref_clocks.xtalin_clock_inKhz =
+ dc->ctx->dc_bios->fw_info.pll_info.crystal_frequency;
+
+ if (!IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment)) {
+ if (res_pool->dccg && res_pool->hubbub) {
+
+ (res_pool->dccg->funcs->get_dccg_ref_freq)(res_pool->dccg,
+ dc->ctx->dc_bios->fw_info.pll_info.crystal_frequency,
+ &res_pool->ref_clocks.dccg_ref_clock_inKhz);
+
+ (res_pool->hubbub->funcs->get_dchub_ref_freq)(res_pool->hubbub,
+ res_pool->ref_clocks.dccg_ref_clock_inKhz,
+ &res_pool->ref_clocks.dchub_ref_clock_inKhz);
+ } else {
+ // Not all ASICs have DCCG sw component
+ res_pool->ref_clocks.dccg_ref_clock_inKhz =
+ res_pool->ref_clocks.xtalin_clock_inKhz;
+ res_pool->ref_clocks.dchub_ref_clock_inKhz =
+ res_pool->ref_clocks.xtalin_clock_inKhz;
+ }
+ }
+ } else
+ ASSERT_CRITICAL(false);
+
+ for (i = 0; i < dc->link_count; i++) {
+ /* Power up AND update implementation according to the
+ * required signal (which may be different from the
+ * default signal on connector).
+ */
+ struct dc_link *link = dc->links[i];
+
+ link->link_enc->funcs->hw_init(link->link_enc);
+
+ /* Check for enabled DIG to identify enabled display */
+ if (link->link_enc->funcs->is_dig_enabled &&
+ link->link_enc->funcs->is_dig_enabled(link->link_enc))
+ link->link_status.link_active = true;
+ }
+
+ /* Power gate DSCs */
+ for (i = 0; i < res_pool->res_cap->num_dsc; i++)
+ if (hws->funcs.dsc_pg_control != NULL)
+ hws->funcs.dsc_pg_control(hws, res_pool->dscs[i]->inst, false);
+
+ /* we want to turn off all dp displays before doing detection */
+ if (dc->config.power_down_display_on_boot) {
+ uint8_t dpcd_power_state = '\0';
+ enum dc_status status = DC_ERROR_UNEXPECTED;
+
+ for (i = 0; i < dc->link_count; i++) {
+ if (dc->links[i]->connector_signal != SIGNAL_TYPE_DISPLAY_PORT)
+ continue;
+
+ /* if any of the displays are lit up turn them off */
+ status = core_link_read_dpcd(dc->links[i], DP_SET_POWER,
+ &dpcd_power_state, sizeof(dpcd_power_state));
+ if (status == DC_OK && dpcd_power_state == DP_POWER_STATE_D0) {
+ /* blank dp stream before power off receiver*/
+ if (dc->links[i]->link_enc->funcs->get_dig_frontend) {
+ unsigned int fe;
+
+ fe = dc->links[i]->link_enc->funcs->get_dig_frontend(
+ dc->links[i]->link_enc);
+
+ for (j = 0; j < dc->res_pool->stream_enc_count; j++) {
+ if (fe == dc->res_pool->stream_enc[j]->id) {
+ dc->res_pool->stream_enc[j]->funcs->dp_blank(
+ dc->res_pool->stream_enc[j]);
+ break;
+ }
+ }
+ }
+ dp_receiver_power_ctrl(dc->links[i], false);
+ }
+ }
+ }
+
+ /* If taking control over from VBIOS, we may want to optimize our first
+ * mode set, so we need to skip powering down pipes until we know which
+ * pipes we want to use.
+ * Otherwise, if taking control is not possible, we need to power
+ * everything down.
+ */
+ if (dcb->funcs->is_accelerated_mode(dcb) || dc->config.power_down_display_on_boot) {
+ hws->funcs.init_pipes(dc, dc->current_state);
+ if (dc->res_pool->hubbub->funcs->allow_self_refresh_control)
+ dc->res_pool->hubbub->funcs->allow_self_refresh_control(dc->res_pool->hubbub,
+ !dc->res_pool->hubbub->ctx->dc->debug.disable_stutter);
+ }
+
+ /* In headless boot cases, DIG may be turned
+ * on which causes HW/SW discrepancies.
+ * To avoid this, power down hardware on boot
+ * if DIG is turned on and seamless boot not enabled
+ */
+ if (dc->config.power_down_display_on_boot) {
+ struct dc_link *edp_link = get_edp_link(dc);
+
+ if (edp_link &&
+ edp_link->link_enc->funcs->is_dig_enabled &&
+ edp_link->link_enc->funcs->is_dig_enabled(edp_link->link_enc) &&
+ dc->hwss.edp_backlight_control &&
+ dc->hwss.power_down &&
+ dc->hwss.edp_power_control) {
+ dc->hwss.edp_backlight_control(edp_link, false);
+ dc->hwss.power_down(dc);
+ dc->hwss.edp_power_control(edp_link, false);
+ } else {
+ for (i = 0; i < dc->link_count; i++) {
+ struct dc_link *link = dc->links[i];
+
+ if (link->link_enc->funcs->is_dig_enabled &&
+ link->link_enc->funcs->is_dig_enabled(link->link_enc) &&
+ dc->hwss.power_down) {
+ dc->hwss.power_down(dc);
+ break;
+ }
+
+ }
+ }
+ }
+
+ for (i = 0; i < res_pool->audio_count; i++) {
+ struct audio *audio = res_pool->audios[i];
+
+ audio->funcs->hw_init(audio);
+ }
+
+ for (i = 0; i < dc->link_count; i++) {
+ struct dc_link *link = dc->links[i];
+
+ if (link->panel_cntl)
+ backlight = link->panel_cntl->funcs->hw_init(link->panel_cntl);
+ }
+
+ for (i = 0; i < dc->res_pool->pipe_count; i++) {
+ if (abms[i] != NULL)
+ abms[i]->funcs->abm_init(abms[i], backlight);
+ }
+
+ /* power AFMT HDMI memory TODO: may move to dis/en output save power*/
+ REG_WRITE(DIO_MEM_PWR_CTRL, 0);
+
+ if (!dc->debug.disable_clock_gate) {
+ /* enable all DCN clock gating */
+ REG_WRITE(DCCG_GATE_DISABLE_CNTL, 0);
+
+ REG_WRITE(DCCG_GATE_DISABLE_CNTL2, 0);
+
+ REG_UPDATE(DCFCLK_CNTL, DCFCLK_GATE_DIS, 0);
+ }
+ if (hws->funcs.enable_power_gating_plane)
+ hws->funcs.enable_power_gating_plane(dc->hwseq, true);
+
+ if (dc->clk_mgr->funcs->notify_wm_ranges)
+ dc->clk_mgr->funcs->notify_wm_ranges(dc->clk_mgr);
+
+ if (dc->clk_mgr->funcs->set_hard_max_memclk)
+ dc->clk_mgr->funcs->set_hard_max_memclk(dc->clk_mgr);
+}
+
+void dcn30_set_avmute(struct pipe_ctx *pipe_ctx, bool enable)
+{
+ if (pipe_ctx == NULL)
+ return;
+
+ if (dc_is_hdmi_tmds_signal(pipe_ctx->stream->signal) && pipe_ctx->stream_res.stream_enc != NULL)
+ pipe_ctx->stream_res.stream_enc->funcs->set_avmute(
+ pipe_ctx->stream_res.stream_enc,
+ enable);
+}
+
+void dcn30_update_info_frame(struct pipe_ctx *pipe_ctx)
+{
+ bool is_hdmi_tmds;
+ bool is_dp;
+
+ ASSERT(pipe_ctx->stream);
+
+ if (pipe_ctx->stream_res.stream_enc == NULL)
+ return; /* this is not root pipe */
+
+ is_hdmi_tmds = dc_is_hdmi_tmds_signal(pipe_ctx->stream->signal);
+ is_dp = dc_is_dp_signal(pipe_ctx->stream->signal);
+
+ if (!is_hdmi_tmds)
+ return;
+
+ if (is_hdmi_tmds)
+ pipe_ctx->stream_res.stream_enc->funcs->update_hdmi_info_packets(
+ pipe_ctx->stream_res.stream_enc,
+ &pipe_ctx->stream_res.encoder_info_frame);
+ else
+ pipe_ctx->stream_res.stream_enc->funcs->update_dp_info_packets(
+ pipe_ctx->stream_res.stream_enc,
+ &pipe_ctx->stream_res.encoder_info_frame);
+}
+
+void dcn30_program_dmdata_engine(struct pipe_ctx *pipe_ctx)
+{
+ struct dc_stream_state *stream = pipe_ctx->stream;
+ struct hubp *hubp = pipe_ctx->plane_res.hubp;
+ bool enable = false;
+ struct stream_encoder *stream_enc = pipe_ctx->stream_res.stream_enc;
+ enum dynamic_metadata_mode mode = dc_is_dp_signal(stream->signal)
+ ? dmdata_dp
+ : dmdata_hdmi;
+
+ /* if using dynamic meta, don't set up generic infopackets */
+ if (pipe_ctx->stream->dmdata_address.quad_part != 0) {
+ pipe_ctx->stream_res.encoder_info_frame.hdrsmd.valid = false;
+ enable = true;
+ }
+
+ if (!hubp)
+ return;
+
+ if (!stream_enc || !stream_enc->funcs->set_dynamic_metadata)
+ return;
+
+ stream_enc->funcs->set_dynamic_metadata(stream_enc, enable,
+ hubp->inst, mode);
+}
+
+bool dcn30_apply_idle_power_optimizations(struct dc *dc, bool enable)
+{
+ unsigned int surface_size;
+
+ if (!dc->ctx->dmub_srv)
+ return false;
+
+ if (enable) {
+ if (dc->current_state
+ && dc->current_state->stream_count == 1 // single display only
+ && dc->current_state->stream_status[0].plane_count == 1 // single surface only
+ && dc->current_state->stream_status[0].plane_states[0]->address.page_table_base.quad_part == 0 // no VM
+ // Only 8 and 16 bit formats
+ && dc->current_state->stream_status[0].plane_states[0]->format <= SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616F
+ && dc->current_state->stream_status[0].plane_states[0]->format >= SURFACE_PIXEL_FORMAT_GRPH_ARGB8888) {
+
+ surface_size = dc->current_state->stream_status[0].plane_states[0]->plane_size.surface_pitch *
+ dc->current_state->stream_status[0].plane_states[0]->plane_size.surface_size.height *
+ (dc->current_state->stream_status[0].plane_states[0]->format >= SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616 ? 8 : 4);
+
+ }
+
+ return false;
+ }
+
+ return true;
+}
diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hwseq.h b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hwseq.h
new file mode 100644
index 000000000000..a4989f5ac4e9
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hwseq.h
@@ -0,0 +1,70 @@
+/*
+* Copyright 2020 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef __DC_HWSS_DCN30_H__
+#define __DC_HWSS_DCN30_H__
+
+#include "hw_sequencer_private.h"
+
+struct dc;
+
+void dcn30_init_hw(struct dc *dc);
+void dcn30_program_all_writeback_pipes_in_tree(
+ struct dc *dc,
+ const struct dc_stream_state *stream,
+ struct dc_state *context);
+void dcn30_update_writeback(
+ struct dc *dc,
+ struct dc_writeback_info *wb_info,
+ struct dc_state *context);
+void dcn30_enable_writeback(
+ struct dc *dc,
+ struct dc_writeback_info *wb_info,
+ struct dc_state *context);
+void dcn30_disable_writeback(
+ struct dc *dc,
+ unsigned int dwb_pipe_inst);
+
+bool dcn30_mmhubbub_warmup(
+ struct dc *dc,
+ unsigned int num_dwb,
+ struct dc_writeback_info *wb_info);
+
+bool dcn30_set_blend_lut(struct pipe_ctx *pipe_ctx,
+ const struct dc_plane_state *plane_state);
+
+bool dcn30_set_input_transfer_func(struct dc *dc,
+ struct pipe_ctx *pipe_ctx,
+ const struct dc_plane_state *plane_state);
+bool dcn30_set_output_transfer_func(struct dc *dc,
+ struct pipe_ctx *pipe_ctx,
+ const struct dc_stream_state *stream);
+void dcn30_set_avmute(struct pipe_ctx *pipe_ctx, bool enable);
+void dcn30_update_info_frame(struct pipe_ctx *pipe_ctx);
+void dcn30_program_dmdata_engine(struct pipe_ctx *pipe_ctx);
+
+bool dcn30_apply_idle_power_optimizations(struct dc *dc, bool enable);
+
+#endif /* __DC_HWSS_DCN30_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_init.c b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_init.c
new file mode 100644
index 000000000000..1b354c219d0a
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_init.c
@@ -0,0 +1,141 @@
+/*
+ * Copyright 2020 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#include "dce110/dce110_hw_sequencer.h"
+#include "dcn10/dcn10_hw_sequencer.h"
+#include "dcn20/dcn20_hwseq.h"
+#include "dcn30_hwseq.h"
+
+static const struct hw_sequencer_funcs dcn30_funcs = {
+ .program_gamut_remap = dcn10_program_gamut_remap,
+ .init_hw = dcn30_init_hw,
+ .apply_ctx_to_hw = dce110_apply_ctx_to_hw,
+ .apply_ctx_for_surface = NULL,
+ .program_front_end_for_ctx = dcn20_program_front_end_for_ctx,
+ .post_unlock_program_front_end = dcn20_post_unlock_program_front_end,
+ .update_plane_addr = dcn20_update_plane_addr,
+ .update_dchub = dcn10_update_dchub,
+ .update_pending_status = dcn10_update_pending_status,
+ .program_output_csc = dcn20_program_output_csc,
+ .enable_accelerated_mode = dce110_enable_accelerated_mode,
+ .enable_timing_synchronization = dcn10_enable_timing_synchronization,
+ .enable_per_frame_crtc_position_reset = dcn10_enable_per_frame_crtc_position_reset,
+ .update_info_frame = dcn30_update_info_frame,
+ .send_immediate_sdp_message = dcn10_send_immediate_sdp_message,
+ .enable_stream = dcn20_enable_stream,
+ .disable_stream = dce110_disable_stream,
+ .unblank_stream = dcn20_unblank_stream,
+ .blank_stream = dce110_blank_stream,
+ .enable_audio_stream = dce110_enable_audio_stream,
+ .disable_audio_stream = dce110_disable_audio_stream,
+ .disable_plane = dcn20_disable_plane,
+ .pipe_control_lock = dcn20_pipe_control_lock,
+ .interdependent_update_lock = dcn10_lock_all_pipes,
+ .cursor_lock = dcn10_cursor_lock,
+ .prepare_bandwidth = dcn20_prepare_bandwidth,
+ .optimize_bandwidth = dcn20_optimize_bandwidth,
+ .update_bandwidth = dcn20_update_bandwidth,
+ .set_drr = dcn10_set_drr,
+ .get_position = dcn10_get_position,
+ .set_static_screen_control = dcn10_set_static_screen_control,
+ .setup_stereo = dcn10_setup_stereo,
+ .set_avmute = dcn30_set_avmute,
+ .log_hw_state = dcn10_log_hw_state,
+ .get_hw_state = dcn10_get_hw_state,
+ .clear_status_bits = dcn10_clear_status_bits,
+ .wait_for_mpcc_disconnect = dcn10_wait_for_mpcc_disconnect,
+ .edp_power_control = dce110_edp_power_control,
+ .edp_wait_for_hpd_ready = dce110_edp_wait_for_hpd_ready,
+ .set_cursor_position = dcn10_set_cursor_position,
+ .set_cursor_attribute = dcn10_set_cursor_attribute,
+ .set_cursor_sdr_white_level = dcn10_set_cursor_sdr_white_level,
+ .setup_periodic_interrupt = dcn10_setup_periodic_interrupt,
+ .set_clock = dcn10_set_clock,
+ .get_clock = dcn10_get_clock,
+ .program_triplebuffer = dcn20_program_triple_buffer,
+ .enable_writeback = dcn30_enable_writeback,
+ .disable_writeback = dcn30_disable_writeback,
+ .update_writeback = dcn30_update_writeback,
+ .mmhubbub_warmup = dcn30_mmhubbub_warmup,
+ .dmdata_status_done = dcn20_dmdata_status_done,
+ .program_dmdata_engine = dcn30_program_dmdata_engine,
+ .set_dmdata_attributes = dcn20_set_dmdata_attributes,
+ .init_sys_ctx = dcn20_init_sys_ctx,
+ .init_vm_ctx = dcn20_init_vm_ctx,
+ .set_flip_control_gsl = dcn20_set_flip_control_gsl,
+ .get_vupdate_offset_from_vsync = dcn10_get_vupdate_offset_from_vsync,
+ .apply_idle_power_optimizations = dcn30_apply_idle_power_optimizations,
+ .set_backlight_level = dce110_set_backlight_level,
+ .set_abm_immediate_disable = dce110_set_abm_immediate_disable,
+};
+
+static const struct hwseq_private_funcs dcn30_private_funcs = {
+ .init_pipes = dcn10_init_pipes,
+ .update_plane_addr = dcn20_update_plane_addr,
+ .plane_atomic_disconnect = dcn10_plane_atomic_disconnect,
+ .update_mpcc = dcn20_update_mpcc,
+ .set_input_transfer_func = dcn30_set_input_transfer_func,
+ .set_output_transfer_func = dcn30_set_output_transfer_func,
+ .power_down = dce110_power_down,
+ .enable_display_power_gating = dcn10_dummy_display_power_gating,
+ .blank_pixel_data = dcn20_blank_pixel_data,
+ .reset_hw_ctx_wrap = dcn20_reset_hw_ctx_wrap,
+ .enable_stream_timing = dcn20_enable_stream_timing,
+ .edp_backlight_control = dce110_edp_backlight_control,
+ .disable_stream_gating = dcn20_disable_stream_gating,
+ .enable_stream_gating = dcn20_enable_stream_gating,
+ .setup_vupdate_interrupt = dcn20_setup_vupdate_interrupt,
+ .did_underflow_occur = dcn10_did_underflow_occur,
+ .init_blank = dcn20_init_blank,
+ .disable_vga = dcn20_disable_vga,
+ .bios_golden_init = dcn10_bios_golden_init,
+ .plane_atomic_disable = dcn20_plane_atomic_disable,
+ .plane_atomic_power_down = dcn10_plane_atomic_power_down,
+ .enable_power_gating_plane = dcn20_enable_power_gating_plane,
+ .dpp_pg_control = dcn20_dpp_pg_control,
+ .hubp_pg_control = dcn20_hubp_pg_control,
+ .program_all_writeback_pipes_in_tree = dcn30_program_all_writeback_pipes_in_tree,
+ .update_odm = dcn20_update_odm,
+ .dsc_pg_control = dcn20_dsc_pg_control,
+ .get_surface_visual_confirm_color = dcn10_get_surface_visual_confirm_color,
+ .get_hdr_visual_confirm_color = dcn10_get_hdr_visual_confirm_color,
+ .set_hdr_multiplier = dcn10_set_hdr_multiplier,
+ .verify_allow_pstate_change_high = dcn10_verify_allow_pstate_change_high,
+ .wait_for_blank_complete = dcn20_wait_for_blank_complete,
+ .dccg_init = dcn20_dccg_init,
+ .set_blend_lut = dcn30_set_blend_lut,
+ .set_shaper_3dlut = dcn20_set_shaper_3dlut,
+};
+
+void dcn30_hw_sequencer_construct(struct dc *dc)
+{
+ dc->hwss = dcn30_funcs;
+ dc->hwseq->funcs = dcn30_private_funcs;
+
+ if (IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment)) {
+ dc->hwss.init_hw = dcn20_fpga_init_hw;
+ dc->hwseq->funcs.init_pipes = NULL;
+ }
+}
diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_init.h b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_init.h
new file mode 100644
index 000000000000..c280ff90bfa3
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_init.h
@@ -0,0 +1,33 @@
+/*
+ * Copyright 2020 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef __DC_DCN30_INIT_H__
+#define __DC_DCN30_INIT_H__
+
+struct dc;
+
+void dcn30_hw_sequencer_construct(struct dc *dc);
+
+#endif /* __DC_DCN30_INIT_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_mmhubbub.c b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_mmhubbub.c
new file mode 100644
index 000000000000..1c4b171c68ad
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_mmhubbub.c
@@ -0,0 +1,239 @@
+/*
+ * Copyright 2020 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+
+#include "reg_helper.h"
+#include "resource.h"
+#include "mcif_wb.h"
+#include "dcn30_mmhubbub.h"
+
+
+#define REG(reg)\
+ mcif_wb30->mcif_wb_regs->reg
+
+#define CTX \
+ mcif_wb30->base.ctx
+
+#undef FN
+#define FN(reg_name, field_name) \
+ mcif_wb30->mcif_wb_shift->field_name, mcif_wb30->mcif_wb_mask->field_name
+
+#define MCIF_ADDR(addr) (((unsigned long long)addr & 0xffffffffff) + 0xFE) >> 8
+#define MCIF_ADDR_HIGH(addr) (unsigned long long)addr >> 40
+
+/* wbif programming guide:
+ * 1. set up wbif parameter:
+ * unsigned long long luma_address[4]; //4 frame buffer
+ * unsigned long long chroma_address[4];
+ * unsigned int luma_pitch;
+ * unsigned int chroma_pitch;
+ * unsigned int warmup_pitch=0x10; //256B align, the page size is 4KB when it is 0x10
+ * unsigned int slice_lines; //slice size
+ * unsigned int time_per_pixel; // time per pixel, in ns
+ * unsigned int arbitration_slice; // 0: 2048 bytes 1: 4096 bytes 2: 8192 Bytes
+ * unsigned int max_scaled_time; // used for QOS generation
+ * unsigned int swlock=0x0;
+ * unsigned int cli_watermark[4]; //4 group urgent watermark
+ * unsigned int pstate_watermark[4]; //4 group pstate watermark
+ * unsigned int sw_int_en; // Software interrupt enable, frame end and overflow
+ * unsigned int sw_slice_int_en; // slice end interrupt enable
+ * unsigned int sw_overrun_int_en; // overrun error interrupt enable
+ * unsigned int vce_int_en; // VCE interrupt enable, frame end and overflow
+ * unsigned int vce_slice_int_en; // VCE slice end interrupt enable, frame end and overflow
+ *
+ * 2. configure wbif register
+ * a. call mmhubbub_config_wbif()
+ *
+ * 3. Enable wbif
+ * call set_wbif_bufmgr_enable();
+ *
+ * 4. wbif_dump_status(), option, for debug purpose
+ * the bufmgr status can show the progress of write back, can be used for debug purpose
+ */
+
+static void mmhubbub3_warmup_mcif(struct mcif_wb *mcif_wb,
+ struct mcif_warmup_params *params)
+{
+ struct dcn30_mmhubbub *mcif_wb30 = TO_DCN30_MMHUBBUB(mcif_wb);
+ union large_integer start_address_shift = {.quad_part = params->start_address.quad_part >> 5};
+
+ /* Set base address and region size for warmup */
+ REG_SET(MMHUBBUB_WARMUP_BASE_ADDR_HIGH, 0, MMHUBBUB_WARMUP_BASE_ADDR_HIGH, start_address_shift.high_part);
+ REG_SET(MMHUBBUB_WARMUP_BASE_ADDR_LOW, 0, MMHUBBUB_WARMUP_BASE_ADDR_LOW, start_address_shift.low_part);
+ REG_SET(MMHUBBUB_WARMUP_ADDR_REGION, 0, MMHUBBUB_WARMUP_ADDR_REGION, params->region_size >> 5);
+// REG_SET(MMHUBBUB_WARMUP_P_VMID, 0, MMHUBBUB_WARMUP_P_VMID, params->p_vmid);
+
+ /* Set address increment and enable warmup */
+ REG_SET_3(MMHUBBUB_WARMUP_CONTROL_STATUS, 0, MMHUBBUB_WARMUP_EN, true,
+ MMHUBBUB_WARMUP_SW_INT_EN, true,
+ MMHUBBUB_WARMUP_INC_ADDR, params->address_increment >> 5);
+
+ /* Wait for an interrupt to signal warmup is completed */
+ REG_WAIT(MMHUBBUB_WARMUP_CONTROL_STATUS, MMHUBBUB_WARMUP_SW_INT_STATUS, 1, 20, 100);
+
+ /* Acknowledge interrupt */
+ REG_UPDATE(MMHUBBUB_WARMUP_CONTROL_STATUS, MMHUBBUB_WARMUP_SW_INT_ACK, 1);
+
+ /* Disable warmup */
+ REG_UPDATE(MMHUBBUB_WARMUP_CONTROL_STATUS, MMHUBBUB_WARMUP_EN, false);
+}
+
+void mmhubbub3_config_mcif_buf(struct mcif_wb *mcif_wb,
+ struct mcif_buf_params *params,
+ unsigned int dest_height)
+{
+ struct dcn30_mmhubbub *mcif_wb30 = TO_DCN30_MMHUBBUB(mcif_wb);
+
+ /* buffer address for packing mode or Luma in planar mode */
+ REG_UPDATE(MCIF_WB_BUF_1_ADDR_Y, MCIF_WB_BUF_1_ADDR_Y, MCIF_ADDR(params->luma_address[0]));
+ REG_UPDATE(MCIF_WB_BUF_1_ADDR_Y_HIGH, MCIF_WB_BUF_1_ADDR_Y_HIGH, MCIF_ADDR_HIGH(params->luma_address[0]));
+
+ /* buffer address for Chroma in planar mode (unused in packing mode) */
+ REG_UPDATE(MCIF_WB_BUF_1_ADDR_C, MCIF_WB_BUF_1_ADDR_C, MCIF_ADDR(params->chroma_address[0]));
+ REG_UPDATE(MCIF_WB_BUF_1_ADDR_C_HIGH, MCIF_WB_BUF_1_ADDR_C_HIGH, MCIF_ADDR_HIGH(params->chroma_address[0]));
+
+ /* buffer address for packing mode or Luma in planar mode */
+ REG_UPDATE(MCIF_WB_BUF_2_ADDR_Y, MCIF_WB_BUF_2_ADDR_Y, MCIF_ADDR(params->luma_address[1]));
+ REG_UPDATE(MCIF_WB_BUF_2_ADDR_Y_HIGH, MCIF_WB_BUF_2_ADDR_Y_HIGH, MCIF_ADDR_HIGH(params->luma_address[1]));
+
+ /* buffer address for Chroma in planar mode (unused in packing mode) */
+ REG_UPDATE(MCIF_WB_BUF_2_ADDR_C, MCIF_WB_BUF_2_ADDR_C, MCIF_ADDR(params->chroma_address[1]));
+ REG_UPDATE(MCIF_WB_BUF_2_ADDR_C_HIGH, MCIF_WB_BUF_2_ADDR_C_HIGH, MCIF_ADDR_HIGH(params->chroma_address[1]));
+
+ /* buffer address for packing mode or Luma in planar mode */
+ REG_UPDATE(MCIF_WB_BUF_3_ADDR_Y, MCIF_WB_BUF_3_ADDR_Y, MCIF_ADDR(params->luma_address[2]));
+ REG_UPDATE(MCIF_WB_BUF_3_ADDR_Y_HIGH, MCIF_WB_BUF_3_ADDR_Y_HIGH, MCIF_ADDR_HIGH(params->luma_address[2]));
+
+ /* buffer address for Chroma in planar mode (unused in packing mode) */
+ REG_UPDATE(MCIF_WB_BUF_3_ADDR_C, MCIF_WB_BUF_3_ADDR_C, MCIF_ADDR(params->chroma_address[2]));
+ REG_UPDATE(MCIF_WB_BUF_3_ADDR_C_HIGH, MCIF_WB_BUF_3_ADDR_C_HIGH, MCIF_ADDR_HIGH(params->chroma_address[2]));
+
+ /* buffer address for packing mode or Luma in planar mode */
+ REG_UPDATE(MCIF_WB_BUF_4_ADDR_Y, MCIF_WB_BUF_4_ADDR_Y, MCIF_ADDR(params->luma_address[3]));
+ REG_UPDATE(MCIF_WB_BUF_4_ADDR_Y_HIGH, MCIF_WB_BUF_4_ADDR_Y_HIGH, MCIF_ADDR_HIGH(params->luma_address[3]));
+
+ /* buffer address for Chroma in planar mode (unused in packing mode) */
+ REG_UPDATE(MCIF_WB_BUF_4_ADDR_C, MCIF_WB_BUF_4_ADDR_C, MCIF_ADDR(params->chroma_address[3]));
+ REG_UPDATE(MCIF_WB_BUF_4_ADDR_C_HIGH, MCIF_WB_BUF_4_ADDR_C_HIGH, MCIF_ADDR_HIGH(params->chroma_address[3]));
+
+ /* setup luma & chroma size
+ * should be enough to contain a whole frame Luma data,
+ * the programmed value is frame buffer size [27:8], 256-byte aligned
+ */
+ REG_UPDATE(MCIF_WB_BUF_LUMA_SIZE, MCIF_WB_BUF_LUMA_SIZE, (params->luma_pitch>>8) * dest_height);
+ REG_UPDATE(MCIF_WB_BUF_CHROMA_SIZE, MCIF_WB_BUF_CHROMA_SIZE, (params->chroma_pitch>>8) * dest_height);
+
+ /* enable address fence */
+ REG_UPDATE(MCIF_WB_BUFMGR_SW_CONTROL, MCIF_WB_BUF_ADDR_FENCE_EN, 1);
+
+ /* setup pitch, the programmed value is [15:8], 256B align */
+ REG_UPDATE_2(MCIF_WB_BUF_PITCH, MCIF_WB_BUF_LUMA_PITCH, params->luma_pitch >> 8,
+ MCIF_WB_BUF_CHROMA_PITCH, params->chroma_pitch >> 8);
+}
+
+static void mmhubbub3_config_mcif_arb(struct mcif_wb *mcif_wb,
+ struct mcif_arb_params *params)
+{
+ struct dcn30_mmhubbub *mcif_wb30 = TO_DCN30_MMHUBBUB(mcif_wb);
+
+ /* Programmed by the video driver based on the CRTC timing (for DWB) */
+ REG_UPDATE(MCIF_WB_ARBITRATION_CONTROL, MCIF_WB_TIME_PER_PIXEL, params->time_per_pixel);
+
+ /* Programming dwb watermark */
+ /* Watermark to generate urgent in MCIF_WB_CLI, value is determined by MCIF_WB_CLI_WATERMARK_MASK. */
+ /* Program in ns. A formula will be provided in the pseudo code to calculate the value. */
+ REG_UPDATE(MCIF_WB_WATERMARK, MCIF_WB_CLI_WATERMARK_MASK, 0x0);
+ /* urgent_watermarkA */
+ REG_UPDATE(MCIF_WB_WATERMARK, MCIF_WB_CLI_WATERMARK, params->cli_watermark[0]);
+ REG_UPDATE(MCIF_WB_WATERMARK, MCIF_WB_CLI_WATERMARK_MASK, 0x1);
+ /* urgent_watermarkB */
+ REG_UPDATE(MCIF_WB_WATERMARK, MCIF_WB_CLI_WATERMARK, params->cli_watermark[1]);
+ REG_UPDATE(MCIF_WB_WATERMARK, MCIF_WB_CLI_WATERMARK_MASK, 0x2);
+ /* urgent_watermarkC */
+ REG_UPDATE(MCIF_WB_WATERMARK, MCIF_WB_CLI_WATERMARK, params->cli_watermark[2]);
+ REG_UPDATE(MCIF_WB_WATERMARK, MCIF_WB_CLI_WATERMARK_MASK, 0x3);
+ /* urgent_watermarkD */
+ REG_UPDATE(MCIF_WB_WATERMARK, MCIF_WB_CLI_WATERMARK, params->cli_watermark[3]);
+
+ /* Programming nb pstate watermark */
+ /* nbp_state_change_watermarkA */
+ REG_UPDATE(MCIF_WB_NB_PSTATE_LATENCY_WATERMARK, NB_PSTATE_CHANGE_WATERMARK_MASK, 0x0);
+ REG_UPDATE(MCIF_WB_NB_PSTATE_LATENCY_WATERMARK,
+ NB_PSTATE_CHANGE_REFRESH_WATERMARK, params->pstate_watermark[0]);
+ /* nbp_state_change_watermarkB */
+ REG_UPDATE(MCIF_WB_NB_PSTATE_LATENCY_WATERMARK, NB_PSTATE_CHANGE_WATERMARK_MASK, 0x1);
+ REG_UPDATE(MCIF_WB_NB_PSTATE_LATENCY_WATERMARK,
+ NB_PSTATE_CHANGE_REFRESH_WATERMARK, params->pstate_watermark[1]);
+ /* nbp_state_change_watermarkC */
+ REG_UPDATE(MCIF_WB_NB_PSTATE_LATENCY_WATERMARK, NB_PSTATE_CHANGE_WATERMARK_MASK, 0x2);
+ REG_UPDATE(MCIF_WB_NB_PSTATE_LATENCY_WATERMARK,
+ NB_PSTATE_CHANGE_REFRESH_WATERMARK, params->pstate_watermark[2]);
+ /* nbp_state_change_watermarkD */
+ REG_UPDATE(MCIF_WB_NB_PSTATE_LATENCY_WATERMARK, NB_PSTATE_CHANGE_WATERMARK_MASK, 0x3);
+ REG_UPDATE(MCIF_WB_NB_PSTATE_LATENCY_WATERMARK,
+ NB_PSTATE_CHANGE_REFRESH_WATERMARK, params->pstate_watermark[3]);
+
+ /* dram_speed_change_duration */
+ REG_UPDATE(MCIF_WB_DRAM_SPEED_CHANGE_DURATION_VBI,
+ MCIF_WB_DRAM_SPEED_CHANGE_DURATION_VBI, params->dram_speed_change_duration);
+
+ /* max_scaled_time */
+ REG_UPDATE(MULTI_LEVEL_QOS_CTRL, MAX_SCALED_TIME_TO_URGENT, params->max_scaled_time);
+
+ /* slice_lines */
+ REG_UPDATE(MCIF_WB_BUFMGR_VCE_CONTROL, MCIF_WB_BUFMGR_SLICE_SIZE, params->slice_lines-1);
+
+ /* Set arbitration unit for Luma/Chroma */
+ /* arb_unit=2 should be chosen for more efficiency */
+ /* Arbitration size, 0: 2048 bytes 1: 4096 bytes 2: 8192 Bytes */
+ REG_UPDATE(MCIF_WB_ARBITRATION_CONTROL, MCIF_WB_CLIENT_ARBITRATION_SLICE, params->arbitration_slice);
+}
+
+const struct mcif_wb_funcs dcn30_mmhubbub_funcs = {
+ .warmup_mcif = mmhubbub3_warmup_mcif,
+ .enable_mcif = mmhubbub2_enable_mcif,
+ .disable_mcif = mmhubbub2_disable_mcif,
+ .config_mcif_buf = mmhubbub3_config_mcif_buf,
+ .config_mcif_arb = mmhubbub3_config_mcif_arb,
+ .config_mcif_irq = mmhubbub2_config_mcif_irq,
+ .dump_frame = mcifwb2_dump_frame,
+};
+
+void dcn30_mmhubbub_construct(struct dcn30_mmhubbub *mcif_wb30,
+ struct dc_context *ctx,
+ const struct dcn30_mmhubbub_registers *mcif_wb_regs,
+ const struct dcn30_mmhubbub_shift *mcif_wb_shift,
+ const struct dcn30_mmhubbub_mask *mcif_wb_mask,
+ int inst)
+{
+ mcif_wb30->base.ctx = ctx;
+
+ mcif_wb30->base.inst = inst;
+ mcif_wb30->base.funcs = &dcn30_mmhubbub_funcs;
+
+ mcif_wb30->mcif_wb_regs = mcif_wb_regs;
+ mcif_wb30->mcif_wb_shift = mcif_wb_shift;
+ mcif_wb30->mcif_wb_mask = mcif_wb_mask;
+}
diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_mmhubbub.h b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_mmhubbub.h
new file mode 100644
index 000000000000..f2580e65196c
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_mmhubbub.h
@@ -0,0 +1,463 @@
+/*
+ * Copyright 2020 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef __DC_MCIF_WB_DCN30_H__
+#define __DC_MCIF_WB_DCN30_H__
+
+#include "dcn20/dcn20_mmhubbub.h"
+
+#define TO_DCN30_MMHUBBUB(mcif_wb_base) \
+ container_of(mcif_wb_base, struct dcn30_mmhubbub, base)
+
+/* DCN */
+#define BASE_INNER(seg) \
+ DCE_BASE__INST0_SEG ## seg
+
+#define BASE(seg) \
+ BASE_INNER(seg)
+
+#define MCIF_WB_COMMON_REG_LIST_DCN3_0(inst) \
+ SRI(MCIF_WB_BUFMGR_SW_CONTROL, MCIF_WB, inst),\
+ SRI(MCIF_WB_BUFMGR_STATUS, MCIF_WB, inst),\
+ SRI(MCIF_WB_BUF_PITCH, MCIF_WB, inst),\
+ SRI(MCIF_WB_BUF_1_STATUS, MCIF_WB, inst),\
+ SRI(MCIF_WB_BUF_1_STATUS2, MCIF_WB, inst),\
+ SRI(MCIF_WB_BUF_2_STATUS, MCIF_WB, inst),\
+ SRI(MCIF_WB_BUF_2_STATUS2, MCIF_WB, inst),\
+ SRI(MCIF_WB_BUF_3_STATUS, MCIF_WB, inst),\
+ SRI(MCIF_WB_BUF_3_STATUS2, MCIF_WB, inst),\
+ SRI(MCIF_WB_BUF_4_STATUS, MCIF_WB, inst),\
+ SRI(MCIF_WB_BUF_4_STATUS2, MCIF_WB, inst),\
+ SRI(MCIF_WB_ARBITRATION_CONTROL, MCIF_WB, inst),\
+ SRI(MCIF_WB_SCLK_CHANGE, MCIF_WB, inst),\
+ SRI(MCIF_WB_BUF_1_ADDR_Y, MCIF_WB, inst),\
+ SRI(MCIF_WB_BUF_1_ADDR_C, MCIF_WB, inst),\
+ SRI(MCIF_WB_BUF_2_ADDR_Y, MCIF_WB, inst),\
+ SRI(MCIF_WB_BUF_2_ADDR_C, MCIF_WB, inst),\
+ SRI(MCIF_WB_BUF_3_ADDR_Y, MCIF_WB, inst),\
+ SRI(MCIF_WB_BUF_3_ADDR_C, MCIF_WB, inst),\
+ SRI(MCIF_WB_BUF_4_ADDR_Y, MCIF_WB, inst),\
+ SRI(MCIF_WB_BUF_4_ADDR_C, MCIF_WB, inst),\
+ SRI(MCIF_WB_BUFMGR_VCE_CONTROL, MCIF_WB, inst),\
+ SRI2(MCIF_WB_NB_PSTATE_LATENCY_WATERMARK, MMHUBBUB, inst),\
+ SRI(MCIF_WB_NB_PSTATE_CONTROL, MCIF_WB, inst),\
+ SRI2(MCIF_WB_WATERMARK, MMHUBBUB, inst),\
+ SRI(MCIF_WB_CLOCK_GATER_CONTROL, MCIF_WB, inst),\
+ SRI(MCIF_WB_SELF_REFRESH_CONTROL, MCIF_WB, inst),\
+ SRI(MULTI_LEVEL_QOS_CTRL, MCIF_WB, inst),\
+ SRI(MCIF_WB_BUF_LUMA_SIZE, MCIF_WB, inst),\
+ SRI(MCIF_WB_BUF_CHROMA_SIZE, MCIF_WB, inst),\
+ SRI(MCIF_WB_BUF_1_ADDR_Y_HIGH, MCIF_WB, inst),\
+ SRI(MCIF_WB_BUF_1_ADDR_C_HIGH, MCIF_WB, inst),\
+ SRI(MCIF_WB_BUF_2_ADDR_Y_HIGH, MCIF_WB, inst),\
+ SRI(MCIF_WB_BUF_2_ADDR_C_HIGH, MCIF_WB, inst),\
+ SRI(MCIF_WB_BUF_3_ADDR_Y_HIGH, MCIF_WB, inst),\
+ SRI(MCIF_WB_BUF_3_ADDR_C_HIGH, MCIF_WB, inst),\
+ SRI(MCIF_WB_BUF_4_ADDR_Y_HIGH, MCIF_WB, inst),\
+ SRI(MCIF_WB_BUF_4_ADDR_C_HIGH, MCIF_WB, inst),\
+ SRI(MCIF_WB_BUF_1_RESOLUTION, MCIF_WB, inst),\
+ SRI(MCIF_WB_BUF_2_RESOLUTION, MCIF_WB, inst),\
+ SRI(MCIF_WB_BUF_3_RESOLUTION, MCIF_WB, inst),\
+ SRI(MCIF_WB_BUF_4_RESOLUTION, MCIF_WB, inst),\
+ SRI2(MMHUBBUB_MEM_PWR_CNTL, MMHUBBUB, inst),\
+ SRI2(MMHUBBUB_WARMUP_ADDR_REGION, MMHUBBUB, inst),\
+ SRI2(MMHUBBUB_WARMUP_BASE_ADDR_HIGH, MMHUBBUB, inst),\
+ SRI2(MMHUBBUB_WARMUP_BASE_ADDR_LOW, MMHUBBUB, inst),\
+ SRI2(MMHUBBUB_WARMUP_CONTROL_STATUS, MMHUBBUB, inst),\
+ SRI2(MMHUBBUB_WARMUP_P_VMID, MMHUBBUB, inst),\
+ SRI(MCIF_WB_DRAM_SPEED_CHANGE_DURATION_VBI, MCIF_WB, inst)
+
+#define MCIF_WB_COMMON_REG_LIST_DCN30(inst) \
+ SRI2(MCIF_WB_BUFMGR_SW_CONTROL, MCIF_WB, inst),\
+ SRI2(MCIF_WB_BUFMGR_STATUS, MCIF_WB, inst),\
+ SRI2(MCIF_WB_BUF_PITCH, MCIF_WB, inst),\
+ SRI2(MCIF_WB_BUF_1_STATUS, MCIF_WB, inst),\
+ SRI2(MCIF_WB_BUF_1_STATUS2, MCIF_WB, inst),\
+ SRI2(MCIF_WB_BUF_2_STATUS, MCIF_WB, inst),\
+ SRI2(MCIF_WB_BUF_2_STATUS2, MCIF_WB, inst),\
+ SRI2(MCIF_WB_BUF_3_STATUS, MCIF_WB, inst),\
+ SRI2(MCIF_WB_BUF_3_STATUS2, MCIF_WB, inst),\
+ SRI2(MCIF_WB_BUF_4_STATUS, MCIF_WB, inst),\
+ SRI2(MCIF_WB_BUF_4_STATUS2, MCIF_WB, inst),\
+ SRI2(MCIF_WB_ARBITRATION_CONTROL, MCIF_WB, inst),\
+ SRI2(MCIF_WB_SCLK_CHANGE, MCIF_WB, inst),\
+ SRI2(MCIF_WB_BUF_1_ADDR_Y, MCIF_WB, inst),\
+ SRI2(MCIF_WB_BUF_1_ADDR_C, MCIF_WB, inst),\
+ SRI2(MCIF_WB_BUF_2_ADDR_Y, MCIF_WB, inst),\
+ SRI2(MCIF_WB_BUF_2_ADDR_C, MCIF_WB, inst),\
+ SRI2(MCIF_WB_BUF_3_ADDR_Y, MCIF_WB, inst),\
+ SRI2(MCIF_WB_BUF_3_ADDR_C, MCIF_WB, inst),\
+ SRI2(MCIF_WB_BUF_4_ADDR_Y, MCIF_WB, inst),\
+ SRI2(MCIF_WB_BUF_4_ADDR_C, MCIF_WB, inst),\
+ SRI2(MCIF_WB_BUFMGR_VCE_CONTROL, MCIF_WB, inst),\
+ SRI2(MCIF_WB_NB_PSTATE_LATENCY_WATERMARK, MMHUBBUB, inst),\
+ SRI2(MCIF_WB_NB_PSTATE_CONTROL, MCIF_WB, inst),\
+ SRI2(MCIF_WB_WATERMARK, MMHUBBUB, inst),\
+ SRI2(MCIF_WB_CLOCK_GATER_CONTROL, MCIF_WB, inst),\
+ SRI2(MCIF_WB_SELF_REFRESH_CONTROL, MCIF_WB, inst),\
+ SRI2(MULTI_LEVEL_QOS_CTRL, MCIF_WB, inst),\
+ SRI2(MCIF_WB_BUF_LUMA_SIZE, MCIF_WB, inst),\
+ SRI2(MCIF_WB_BUF_CHROMA_SIZE, MCIF_WB, inst),\
+ SRI2(MCIF_WB_BUF_1_ADDR_Y_HIGH, MCIF_WB, inst),\
+ SRI2(MCIF_WB_BUF_1_ADDR_C_HIGH, MCIF_WB, inst),\
+ SRI2(MCIF_WB_BUF_2_ADDR_Y_HIGH, MCIF_WB, inst),\
+ SRI2(MCIF_WB_BUF_2_ADDR_C_HIGH, MCIF_WB, inst),\
+ SRI2(MCIF_WB_BUF_3_ADDR_Y_HIGH, MCIF_WB, inst),\
+ SRI2(MCIF_WB_BUF_3_ADDR_C_HIGH, MCIF_WB, inst),\
+ SRI2(MCIF_WB_BUF_4_ADDR_Y_HIGH, MCIF_WB, inst),\
+ SRI2(MCIF_WB_BUF_4_ADDR_C_HIGH, MCIF_WB, inst),\
+ SRI2(MCIF_WB_BUF_1_RESOLUTION, MCIF_WB, inst),\
+ SRI2(MCIF_WB_BUF_2_RESOLUTION, MCIF_WB, inst),\
+ SRI2(MCIF_WB_BUF_3_RESOLUTION, MCIF_WB, inst),\
+ SRI2(MCIF_WB_BUF_4_RESOLUTION, MCIF_WB, inst),\
+ SRI2(MMHUBBUB_MEM_PWR_CNTL, MMHUBBUB, inst),\
+ SRI2(MMHUBBUB_WARMUP_ADDR_REGION, MMHUBBUB, inst),\
+ SRI2(MMHUBBUB_WARMUP_BASE_ADDR_HIGH, MMHUBBUB, inst),\
+ SRI2(MMHUBBUB_WARMUP_BASE_ADDR_LOW, MMHUBBUB, inst),\
+ SRI2(MMHUBBUB_WARMUP_CONTROL_STATUS, MMHUBBUB, inst),\
+ SRI2(MCIF_WB_DRAM_SPEED_CHANGE_DURATION_VBI, MCIF_WB, inst)
+
+#define MCIF_WB_COMMON_MASK_SH_LIST_DCN3_0(mask_sh) \
+ SF(MCIF_WB0_MCIF_WB_BUFMGR_SW_CONTROL, MCIF_WB_BUFMGR_ENABLE, mask_sh),\
+ SF(MCIF_WB0_MCIF_WB_BUFMGR_SW_CONTROL, MCIF_WB_BUFMGR_SW_INT_EN, mask_sh),\
+ SF(MCIF_WB0_MCIF_WB_BUFMGR_SW_CONTROL, MCIF_WB_BUFMGR_SW_INT_ACK, mask_sh),\
+ SF(MCIF_WB0_MCIF_WB_BUFMGR_SW_CONTROL, MCIF_WB_BUFMGR_SW_SLICE_INT_EN, mask_sh),\
+ SF(MCIF_WB0_MCIF_WB_BUFMGR_SW_CONTROL, MCIF_WB_BUFMGR_SW_OVERRUN_INT_EN, mask_sh),\
+ SF(MCIF_WB0_MCIF_WB_BUFMGR_SW_CONTROL, MCIF_WB_BUFMGR_SW_LOCK, mask_sh),\
+ SF(MCIF_WB0_MCIF_WB_BUFMGR_SW_CONTROL, MCIF_WB_BUF_ADDR_FENCE_EN, mask_sh),\
+ SF(MCIF_WB0_MCIF_WB_BUFMGR_STATUS, MCIF_WB_BUFMGR_VCE_INT_STATUS, mask_sh),\
+ SF(MCIF_WB0_MCIF_WB_BUFMGR_STATUS, MCIF_WB_BUFMGR_SW_INT_STATUS, mask_sh),\
+ SF(MCIF_WB0_MCIF_WB_BUFMGR_STATUS, MCIF_WB_BUFMGR_SW_OVERRUN_INT_STATUS, mask_sh),\
+ SF(MCIF_WB0_MCIF_WB_BUFMGR_STATUS, MCIF_WB_BUFMGR_CUR_BUF, mask_sh),\
+ SF(MCIF_WB0_MCIF_WB_BUFMGR_STATUS, MCIF_WB_BUFMGR_BUFTAG, mask_sh),\
+ SF(MCIF_WB0_MCIF_WB_BUFMGR_STATUS, MCIF_WB_BUFMGR_CUR_LINE_L, mask_sh),\
+ SF(MCIF_WB0_MCIF_WB_BUFMGR_STATUS, MCIF_WB_BUFMGR_NEXT_BUF, mask_sh),\
+ SF(MCIF_WB0_MCIF_WB_BUF_PITCH, MCIF_WB_BUF_LUMA_PITCH, mask_sh),\
+ SF(MCIF_WB0_MCIF_WB_BUF_PITCH, MCIF_WB_BUF_CHROMA_PITCH, mask_sh),\
+ SF(MCIF_WB0_MCIF_WB_BUF_1_STATUS, MCIF_WB_BUF_1_ACTIVE, mask_sh),\
+ SF(MCIF_WB0_MCIF_WB_BUF_1_STATUS, MCIF_WB_BUF_1_SW_LOCKED, mask_sh),\
+ SF(MCIF_WB0_MCIF_WB_BUF_1_STATUS, MCIF_WB_BUF_1_VCE_LOCKED, mask_sh),\
+ SF(MCIF_WB0_MCIF_WB_BUF_1_STATUS, MCIF_WB_BUF_1_OVERFLOW, mask_sh),\
+ SF(MCIF_WB0_MCIF_WB_BUF_1_STATUS, MCIF_WB_BUF_1_DISABLE, mask_sh),\
+ SF(MCIF_WB0_MCIF_WB_BUF_1_STATUS, MCIF_WB_BUF_1_MODE, mask_sh),\
+ SF(MCIF_WB0_MCIF_WB_BUF_1_STATUS, MCIF_WB_BUF_1_BUFTAG, mask_sh),\
+ SF(MCIF_WB0_MCIF_WB_BUF_1_STATUS, MCIF_WB_BUF_1_NXT_BUF, mask_sh),\
+ SF(MCIF_WB0_MCIF_WB_BUF_1_STATUS, MCIF_WB_BUF_1_CUR_LINE_L, mask_sh),\
+ SF(MCIF_WB0_MCIF_WB_BUF_1_STATUS2, MCIF_WB_BUF_1_NEW_CONTENT, mask_sh),\
+ SF(MCIF_WB0_MCIF_WB_BUF_1_STATUS2, MCIF_WB_BUF_1_COLOR_DEPTH, mask_sh),\
+ SF(MCIF_WB0_MCIF_WB_BUF_1_STATUS2, MCIF_WB_BUF_1_TMZ_BLACK_PIXEL, mask_sh),\
+ SF(MCIF_WB0_MCIF_WB_BUF_1_STATUS2, MCIF_WB_BUF_1_TMZ, mask_sh),\
+ SF(MCIF_WB0_MCIF_WB_BUF_1_STATUS2, MCIF_WB_BUF_1_Y_OVERRUN, mask_sh),\
+ SF(MCIF_WB0_MCIF_WB_BUF_1_STATUS2, MCIF_WB_BUF_1_C_OVERRUN, mask_sh),\
+ SF(MCIF_WB0_MCIF_WB_BUF_2_STATUS, MCIF_WB_BUF_2_ACTIVE, mask_sh),\
+ SF(MCIF_WB0_MCIF_WB_BUF_2_STATUS, MCIF_WB_BUF_2_SW_LOCKED, mask_sh),\
+ SF(MCIF_WB0_MCIF_WB_BUF_2_STATUS, MCIF_WB_BUF_2_VCE_LOCKED, mask_sh),\
+ SF(MCIF_WB0_MCIF_WB_BUF_2_STATUS, MCIF_WB_BUF_2_OVERFLOW, mask_sh),\
+ SF(MCIF_WB0_MCIF_WB_BUF_2_STATUS, MCIF_WB_BUF_2_DISABLE, mask_sh),\
+ SF(MCIF_WB0_MCIF_WB_BUF_2_STATUS, MCIF_WB_BUF_2_MODE, mask_sh),\
+ SF(MCIF_WB0_MCIF_WB_BUF_2_STATUS, MCIF_WB_BUF_2_BUFTAG, mask_sh),\
+ SF(MCIF_WB0_MCIF_WB_BUF_2_STATUS, MCIF_WB_BUF_2_NXT_BUF, mask_sh),\
+ SF(MCIF_WB0_MCIF_WB_BUF_2_STATUS, MCIF_WB_BUF_2_CUR_LINE_L, mask_sh),\
+ SF(MCIF_WB0_MCIF_WB_BUF_2_STATUS2, MCIF_WB_BUF_2_NEW_CONTENT, mask_sh),\
+ SF(MCIF_WB0_MCIF_WB_BUF_2_STATUS2, MCIF_WB_BUF_2_COLOR_DEPTH, mask_sh),\
+ SF(MCIF_WB0_MCIF_WB_BUF_2_STATUS2, MCIF_WB_BUF_2_TMZ_BLACK_PIXEL, mask_sh),\
+ SF(MCIF_WB0_MCIF_WB_BUF_2_STATUS2, MCIF_WB_BUF_2_TMZ, mask_sh),\
+ SF(MCIF_WB0_MCIF_WB_BUF_2_STATUS2, MCIF_WB_BUF_2_Y_OVERRUN, mask_sh),\
+ SF(MCIF_WB0_MCIF_WB_BUF_2_STATUS2, MCIF_WB_BUF_2_C_OVERRUN, mask_sh),\
+ SF(MCIF_WB0_MCIF_WB_BUF_3_STATUS, MCIF_WB_BUF_3_ACTIVE, mask_sh),\
+ SF(MCIF_WB0_MCIF_WB_BUF_3_STATUS, MCIF_WB_BUF_3_SW_LOCKED, mask_sh),\
+ SF(MCIF_WB0_MCIF_WB_BUF_3_STATUS, MCIF_WB_BUF_3_VCE_LOCKED, mask_sh),\
+ SF(MCIF_WB0_MCIF_WB_BUF_3_STATUS, MCIF_WB_BUF_3_OVERFLOW, mask_sh),\
+ SF(MCIF_WB0_MCIF_WB_BUF_3_STATUS, MCIF_WB_BUF_3_DISABLE, mask_sh),\
+ SF(MCIF_WB0_MCIF_WB_BUF_3_STATUS, MCIF_WB_BUF_3_MODE, mask_sh),\
+ SF(MCIF_WB0_MCIF_WB_BUF_3_STATUS, MCIF_WB_BUF_3_BUFTAG, mask_sh),\
+ SF(MCIF_WB0_MCIF_WB_BUF_3_STATUS, MCIF_WB_BUF_3_NXT_BUF, mask_sh),\
+ SF(MCIF_WB0_MCIF_WB_BUF_3_STATUS, MCIF_WB_BUF_3_CUR_LINE_L, mask_sh),\
+ SF(MCIF_WB0_MCIF_WB_BUF_3_STATUS2, MCIF_WB_BUF_3_NEW_CONTENT, mask_sh),\
+ SF(MCIF_WB0_MCIF_WB_BUF_3_STATUS2, MCIF_WB_BUF_3_COLOR_DEPTH, mask_sh),\
+ SF(MCIF_WB0_MCIF_WB_BUF_3_STATUS2, MCIF_WB_BUF_3_TMZ_BLACK_PIXEL, mask_sh),\
+ SF(MCIF_WB0_MCIF_WB_BUF_3_STATUS2, MCIF_WB_BUF_3_TMZ, mask_sh),\
+ SF(MCIF_WB0_MCIF_WB_BUF_3_STATUS2, MCIF_WB_BUF_3_Y_OVERRUN, mask_sh),\
+ SF(MCIF_WB0_MCIF_WB_BUF_3_STATUS2, MCIF_WB_BUF_3_C_OVERRUN, mask_sh),\
+ SF(MCIF_WB0_MCIF_WB_BUF_4_STATUS, MCIF_WB_BUF_4_ACTIVE, mask_sh),\
+ SF(MCIF_WB0_MCIF_WB_BUF_4_STATUS, MCIF_WB_BUF_4_SW_LOCKED, mask_sh),\
+ SF(MCIF_WB0_MCIF_WB_BUF_4_STATUS, MCIF_WB_BUF_4_VCE_LOCKED, mask_sh),\
+ SF(MCIF_WB0_MCIF_WB_BUF_4_STATUS, MCIF_WB_BUF_4_OVERFLOW, mask_sh),\
+ SF(MCIF_WB0_MCIF_WB_BUF_4_STATUS, MCIF_WB_BUF_4_DISABLE, mask_sh),\
+ SF(MCIF_WB0_MCIF_WB_BUF_4_STATUS, MCIF_WB_BUF_4_MODE, mask_sh),\
+ SF(MCIF_WB0_MCIF_WB_BUF_4_STATUS, MCIF_WB_BUF_4_BUFTAG, mask_sh),\
+ SF(MCIF_WB0_MCIF_WB_BUF_4_STATUS, MCIF_WB_BUF_4_NXT_BUF, mask_sh),\
+ SF(MCIF_WB0_MCIF_WB_BUF_4_STATUS, MCIF_WB_BUF_4_CUR_LINE_L, mask_sh),\
+ SF(MCIF_WB0_MCIF_WB_BUF_4_STATUS2, MCIF_WB_BUF_4_NEW_CONTENT, mask_sh),\
+ SF(MCIF_WB0_MCIF_WB_BUF_4_STATUS2, MCIF_WB_BUF_4_COLOR_DEPTH, mask_sh),\
+ SF(MCIF_WB0_MCIF_WB_BUF_4_STATUS2, MCIF_WB_BUF_4_TMZ_BLACK_PIXEL, mask_sh),\
+ SF(MCIF_WB0_MCIF_WB_BUF_4_STATUS2, MCIF_WB_BUF_4_TMZ, mask_sh),\
+ SF(MCIF_WB0_MCIF_WB_BUF_4_STATUS2, MCIF_WB_BUF_4_Y_OVERRUN, mask_sh),\
+ SF(MCIF_WB0_MCIF_WB_BUF_4_STATUS2, MCIF_WB_BUF_4_C_OVERRUN, mask_sh),\
+ SF(MCIF_WB0_MCIF_WB_ARBITRATION_CONTROL, MCIF_WB_CLIENT_ARBITRATION_SLICE, mask_sh),\
+ SF(MCIF_WB0_MCIF_WB_ARBITRATION_CONTROL, MCIF_WB_TIME_PER_PIXEL, mask_sh),\
+ SF(MCIF_WB0_MCIF_WB_SCLK_CHANGE, WM_CHANGE_ACK_FORCE_ON, mask_sh),\
+ SF(MCIF_WB0_MCIF_WB_BUF_1_ADDR_Y, MCIF_WB_BUF_1_ADDR_Y, mask_sh),\
+ SF(MCIF_WB0_MCIF_WB_BUF_1_ADDR_C, MCIF_WB_BUF_1_ADDR_C, mask_sh),\
+ SF(MCIF_WB0_MCIF_WB_BUF_2_ADDR_Y, MCIF_WB_BUF_2_ADDR_Y, mask_sh),\
+ SF(MCIF_WB0_MCIF_WB_BUF_2_ADDR_C, MCIF_WB_BUF_2_ADDR_C, mask_sh),\
+ SF(MCIF_WB0_MCIF_WB_BUF_3_ADDR_Y, MCIF_WB_BUF_3_ADDR_Y, mask_sh),\
+ SF(MCIF_WB0_MCIF_WB_BUF_3_ADDR_C, MCIF_WB_BUF_3_ADDR_C, mask_sh),\
+ SF(MCIF_WB0_MCIF_WB_BUF_4_ADDR_Y, MCIF_WB_BUF_4_ADDR_Y, mask_sh),\
+ SF(MCIF_WB0_MCIF_WB_BUF_4_ADDR_C, MCIF_WB_BUF_4_ADDR_C, mask_sh),\
+ SF(MCIF_WB0_MCIF_WB_BUFMGR_VCE_CONTROL, MCIF_WB_BUFMGR_VCE_LOCK_IGNORE, mask_sh),\
+ SF(MCIF_WB0_MCIF_WB_BUFMGR_VCE_CONTROL, MCIF_WB_BUFMGR_VCE_INT_EN, mask_sh),\
+ SF(MCIF_WB0_MCIF_WB_BUFMGR_VCE_CONTROL, MCIF_WB_BUFMGR_VCE_INT_ACK, mask_sh),\
+ SF(MCIF_WB0_MCIF_WB_BUFMGR_VCE_CONTROL, MCIF_WB_BUFMGR_VCE_SLICE_INT_EN, mask_sh),\
+ SF(MCIF_WB0_MCIF_WB_BUFMGR_VCE_CONTROL, MCIF_WB_BUFMGR_VCE_LOCK, mask_sh),\
+ SF(MCIF_WB0_MCIF_WB_BUFMGR_VCE_CONTROL, MCIF_WB_BUFMGR_SLICE_SIZE, mask_sh),\
+ SF(MCIF_WB_NB_PSTATE_LATENCY_WATERMARK, NB_PSTATE_CHANGE_REFRESH_WATERMARK, mask_sh),\
+ SF(MCIF_WB_NB_PSTATE_LATENCY_WATERMARK, NB_PSTATE_CHANGE_WATERMARK_MASK, mask_sh),\
+ SF(MCIF_WB0_MCIF_WB_NB_PSTATE_CONTROL, NB_PSTATE_CHANGE_URGENT_DURING_REQUEST, mask_sh),\
+ SF(MCIF_WB0_MCIF_WB_NB_PSTATE_CONTROL, NB_PSTATE_CHANGE_FORCE_ON, mask_sh),\
+ SF(MCIF_WB0_MCIF_WB_NB_PSTATE_CONTROL, NB_PSTATE_ALLOW_FOR_URGENT, mask_sh),\
+ SF(MCIF_WB_WATERMARK, MCIF_WB_CLI_WATERMARK, mask_sh),\
+ SF(MCIF_WB_WATERMARK, MCIF_WB_CLI_WATERMARK_MASK, mask_sh),\
+ SF(MCIF_WB0_MCIF_WB_CLOCK_GATER_CONTROL, MCIF_WB_CLI_CLOCK_GATER_OVERRIDE, mask_sh),\
+ SF(MCIF_WB0_MCIF_WB_SELF_REFRESH_CONTROL, DIS_REFRESH_UNDER_NBPREQ, mask_sh),\
+ SF(MCIF_WB0_MCIF_WB_SELF_REFRESH_CONTROL, PERFRAME_SELF_REFRESH, mask_sh),\
+ SF(MCIF_WB0_MULTI_LEVEL_QOS_CTRL, MAX_SCALED_TIME_TO_URGENT, mask_sh),\
+ SF(MCIF_WB0_MCIF_WB_BUF_LUMA_SIZE, MCIF_WB_BUF_LUMA_SIZE, mask_sh),\
+ SF(MCIF_WB0_MCIF_WB_BUF_CHROMA_SIZE, MCIF_WB_BUF_CHROMA_SIZE, mask_sh),\
+ SF(MCIF_WB0_MCIF_WB_BUF_1_ADDR_Y_HIGH, MCIF_WB_BUF_1_ADDR_Y_HIGH, mask_sh),\
+ SF(MCIF_WB0_MCIF_WB_BUF_1_ADDR_C_HIGH, MCIF_WB_BUF_1_ADDR_C_HIGH, mask_sh),\
+ SF(MCIF_WB0_MCIF_WB_BUF_2_ADDR_Y_HIGH, MCIF_WB_BUF_2_ADDR_Y_HIGH, mask_sh),\
+ SF(MCIF_WB0_MCIF_WB_BUF_2_ADDR_C_HIGH, MCIF_WB_BUF_2_ADDR_C_HIGH, mask_sh),\
+ SF(MCIF_WB0_MCIF_WB_BUF_3_ADDR_Y_HIGH, MCIF_WB_BUF_3_ADDR_Y_HIGH, mask_sh),\
+ SF(MCIF_WB0_MCIF_WB_BUF_3_ADDR_C_HIGH, MCIF_WB_BUF_3_ADDR_C_HIGH, mask_sh),\
+ SF(MCIF_WB0_MCIF_WB_BUF_4_ADDR_Y_HIGH, MCIF_WB_BUF_4_ADDR_Y_HIGH, mask_sh),\
+ SF(MCIF_WB0_MCIF_WB_BUF_4_ADDR_C_HIGH, MCIF_WB_BUF_4_ADDR_C_HIGH, mask_sh),\
+ SF(MCIF_WB0_MCIF_WB_BUF_1_RESOLUTION, MCIF_WB_BUF_1_RESOLUTION_WIDTH, mask_sh),\
+ SF(MCIF_WB0_MCIF_WB_BUF_1_RESOLUTION, MCIF_WB_BUF_1_RESOLUTION_HEIGHT, mask_sh),\
+ SF(MCIF_WB0_MCIF_WB_BUF_2_RESOLUTION, MCIF_WB_BUF_2_RESOLUTION_WIDTH, mask_sh),\
+ SF(MCIF_WB0_MCIF_WB_BUF_2_RESOLUTION, MCIF_WB_BUF_2_RESOLUTION_HEIGHT, mask_sh),\
+ SF(MCIF_WB0_MCIF_WB_BUF_3_RESOLUTION, MCIF_WB_BUF_3_RESOLUTION_WIDTH, mask_sh),\
+ SF(MCIF_WB0_MCIF_WB_BUF_3_RESOLUTION, MCIF_WB_BUF_3_RESOLUTION_HEIGHT, mask_sh),\
+ SF(MCIF_WB0_MCIF_WB_BUF_4_RESOLUTION, MCIF_WB_BUF_4_RESOLUTION_WIDTH, mask_sh),\
+ SF(MCIF_WB0_MCIF_WB_BUF_4_RESOLUTION, MCIF_WB_BUF_4_RESOLUTION_HEIGHT, mask_sh),\
+ SF(MMHUBBUB_MEM_PWR_CNTL, WBIF_WHOLE_BUF_MODE, mask_sh),\
+ SF(MMHUBBUB_WARMUP_ADDR_REGION, MMHUBBUB_WARMUP_ADDR_REGION, mask_sh),\
+ SF(MMHUBBUB_WARMUP_BASE_ADDR_HIGH, MMHUBBUB_WARMUP_BASE_ADDR_HIGH, mask_sh),\
+ SF(MMHUBBUB_WARMUP_BASE_ADDR_LOW, MMHUBBUB_WARMUP_BASE_ADDR_LOW, mask_sh),\
+ SF(MMHUBBUB_WARMUP_CONTROL_STATUS, MMHUBBUB_WARMUP_EN, mask_sh),\
+ SF(MMHUBBUB_WARMUP_CONTROL_STATUS, MMHUBBUB_WARMUP_SW_INT_EN, mask_sh),\
+ SF(MMHUBBUB_WARMUP_CONTROL_STATUS, MMHUBBUB_WARMUP_SW_INT_STATUS, mask_sh),\
+ SF(MMHUBBUB_WARMUP_CONTROL_STATUS, MMHUBBUB_WARMUP_SW_INT_ACK, mask_sh),\
+ SF(MMHUBBUB_WARMUP_CONTROL_STATUS, MMHUBBUB_WARMUP_INC_ADDR, mask_sh),\
+ SF(MMHUBBUB_WARMUP_P_VMID, MMHUBBUB_WARMUP_P_VMID, mask_sh),\
+ SF(MCIF_WB0_MCIF_WB_DRAM_SPEED_CHANGE_DURATION_VBI, MCIF_WB_DRAM_SPEED_CHANGE_DURATION_VBI, mask_sh)
+
+
+#define MCIF_WB_COMMON_MASK_SH_LIST_DCN30(mask_sh) \
+ SF(MCIF_WB_BUFMGR_SW_CONTROL, MCIF_WB_BUFMGR_ENABLE, mask_sh),\
+ SF(MCIF_WB_BUFMGR_SW_CONTROL, MCIF_WB_BUFMGR_SW_INT_EN, mask_sh),\
+ SF(MCIF_WB_BUFMGR_SW_CONTROL, MCIF_WB_BUFMGR_SW_INT_ACK, mask_sh),\
+ SF(MCIF_WB_BUFMGR_SW_CONTROL, MCIF_WB_BUFMGR_SW_SLICE_INT_EN, mask_sh),\
+ SF(MCIF_WB_BUFMGR_SW_CONTROL, MCIF_WB_BUFMGR_SW_OVERRUN_INT_EN, mask_sh),\
+ SF(MCIF_WB_BUFMGR_SW_CONTROL, MCIF_WB_BUFMGR_SW_LOCK, mask_sh),\
+ SF(MCIF_WB_BUFMGR_SW_CONTROL, MCIF_WB_BUF_ADDR_FENCE_EN, mask_sh),\
+ SF(MCIF_WB_BUFMGR_STATUS, MCIF_WB_BUFMGR_VCE_INT_STATUS, mask_sh),\
+ SF(MCIF_WB_BUFMGR_STATUS, MCIF_WB_BUFMGR_SW_INT_STATUS, mask_sh),\
+ SF(MCIF_WB_BUFMGR_STATUS, MCIF_WB_BUFMGR_SW_OVERRUN_INT_STATUS, mask_sh),\
+ SF(MCIF_WB_BUFMGR_STATUS, MCIF_WB_BUFMGR_CUR_BUF, mask_sh),\
+ SF(MCIF_WB_BUFMGR_STATUS, MCIF_WB_BUFMGR_BUFTAG, mask_sh),\
+ SF(MCIF_WB_BUFMGR_STATUS, MCIF_WB_BUFMGR_CUR_LINE_L, mask_sh),\
+ SF(MCIF_WB_BUFMGR_STATUS, MCIF_WB_BUFMGR_NEXT_BUF, mask_sh),\
+ SF(MCIF_WB_BUF_PITCH, MCIF_WB_BUF_LUMA_PITCH, mask_sh),\
+ SF(MCIF_WB_BUF_PITCH, MCIF_WB_BUF_CHROMA_PITCH, mask_sh),\
+ SF(MCIF_WB_BUF_1_STATUS, MCIF_WB_BUF_1_ACTIVE, mask_sh),\
+ SF(MCIF_WB_BUF_1_STATUS, MCIF_WB_BUF_1_SW_LOCKED, mask_sh),\
+ SF(MCIF_WB_BUF_1_STATUS, MCIF_WB_BUF_1_VCE_LOCKED, mask_sh),\
+ SF(MCIF_WB_BUF_1_STATUS, MCIF_WB_BUF_1_OVERFLOW, mask_sh),\
+ SF(MCIF_WB_BUF_1_STATUS, MCIF_WB_BUF_1_DISABLE, mask_sh),\
+ SF(MCIF_WB_BUF_1_STATUS, MCIF_WB_BUF_1_MODE, mask_sh),\
+ SF(MCIF_WB_BUF_1_STATUS, MCIF_WB_BUF_1_BUFTAG, mask_sh),\
+ SF(MCIF_WB_BUF_1_STATUS, MCIF_WB_BUF_1_NXT_BUF, mask_sh),\
+ SF(MCIF_WB_BUF_1_STATUS, MCIF_WB_BUF_1_CUR_LINE_L, mask_sh),\
+ SF(MCIF_WB_BUF_1_STATUS2, MCIF_WB_BUF_1_NEW_CONTENT, mask_sh),\
+ SF(MCIF_WB_BUF_1_STATUS2, MCIF_WB_BUF_1_COLOR_DEPTH, mask_sh),\
+ SF(MCIF_WB_BUF_1_STATUS2, MCIF_WB_BUF_1_TMZ_BLACK_PIXEL, mask_sh),\
+ SF(MCIF_WB_BUF_1_STATUS2, MCIF_WB_BUF_1_TMZ, mask_sh),\
+ SF(MCIF_WB_BUF_1_STATUS2, MCIF_WB_BUF_1_Y_OVERRUN, mask_sh),\
+ SF(MCIF_WB_BUF_1_STATUS2, MCIF_WB_BUF_1_C_OVERRUN, mask_sh),\
+ SF(MCIF_WB_BUF_2_STATUS, MCIF_WB_BUF_2_ACTIVE, mask_sh),\
+ SF(MCIF_WB_BUF_2_STATUS, MCIF_WB_BUF_2_SW_LOCKED, mask_sh),\
+ SF(MCIF_WB_BUF_2_STATUS, MCIF_WB_BUF_2_VCE_LOCKED, mask_sh),\
+ SF(MCIF_WB_BUF_2_STATUS, MCIF_WB_BUF_2_OVERFLOW, mask_sh),\
+ SF(MCIF_WB_BUF_2_STATUS, MCIF_WB_BUF_2_DISABLE, mask_sh),\
+ SF(MCIF_WB_BUF_2_STATUS, MCIF_WB_BUF_2_MODE, mask_sh),\
+ SF(MCIF_WB_BUF_2_STATUS, MCIF_WB_BUF_2_BUFTAG, mask_sh),\
+ SF(MCIF_WB_BUF_2_STATUS, MCIF_WB_BUF_2_NXT_BUF, mask_sh),\
+ SF(MCIF_WB_BUF_2_STATUS, MCIF_WB_BUF_2_CUR_LINE_L, mask_sh),\
+ SF(MCIF_WB_BUF_2_STATUS2, MCIF_WB_BUF_2_NEW_CONTENT, mask_sh),\
+ SF(MCIF_WB_BUF_2_STATUS2, MCIF_WB_BUF_2_COLOR_DEPTH, mask_sh),\
+ SF(MCIF_WB_BUF_2_STATUS2, MCIF_WB_BUF_2_TMZ_BLACK_PIXEL, mask_sh),\
+ SF(MCIF_WB_BUF_2_STATUS2, MCIF_WB_BUF_2_TMZ, mask_sh),\
+ SF(MCIF_WB_BUF_2_STATUS2, MCIF_WB_BUF_2_Y_OVERRUN, mask_sh),\
+ SF(MCIF_WB_BUF_2_STATUS2, MCIF_WB_BUF_2_C_OVERRUN, mask_sh),\
+ SF(MCIF_WB_BUF_3_STATUS, MCIF_WB_BUF_3_ACTIVE, mask_sh),\
+ SF(MCIF_WB_BUF_3_STATUS, MCIF_WB_BUF_3_SW_LOCKED, mask_sh),\
+ SF(MCIF_WB_BUF_3_STATUS, MCIF_WB_BUF_3_VCE_LOCKED, mask_sh),\
+ SF(MCIF_WB_BUF_3_STATUS, MCIF_WB_BUF_3_OVERFLOW, mask_sh),\
+ SF(MCIF_WB_BUF_3_STATUS, MCIF_WB_BUF_3_DISABLE, mask_sh),\
+ SF(MCIF_WB_BUF_3_STATUS, MCIF_WB_BUF_3_MODE, mask_sh),\
+ SF(MCIF_WB_BUF_3_STATUS, MCIF_WB_BUF_3_BUFTAG, mask_sh),\
+ SF(MCIF_WB_BUF_3_STATUS, MCIF_WB_BUF_3_NXT_BUF, mask_sh),\
+ SF(MCIF_WB_BUF_3_STATUS, MCIF_WB_BUF_3_CUR_LINE_L, mask_sh),\
+ SF(MCIF_WB_BUF_3_STATUS2, MCIF_WB_BUF_3_NEW_CONTENT, mask_sh),\
+ SF(MCIF_WB_BUF_3_STATUS2, MCIF_WB_BUF_3_COLOR_DEPTH, mask_sh),\
+ SF(MCIF_WB_BUF_3_STATUS2, MCIF_WB_BUF_3_TMZ_BLACK_PIXEL, mask_sh),\
+ SF(MCIF_WB_BUF_3_STATUS2, MCIF_WB_BUF_3_TMZ, mask_sh),\
+ SF(MCIF_WB_BUF_3_STATUS2, MCIF_WB_BUF_3_Y_OVERRUN, mask_sh),\
+ SF(MCIF_WB_BUF_3_STATUS2, MCIF_WB_BUF_3_C_OVERRUN, mask_sh),\
+ SF(MCIF_WB_BUF_4_STATUS, MCIF_WB_BUF_4_ACTIVE, mask_sh),\
+ SF(MCIF_WB_BUF_4_STATUS, MCIF_WB_BUF_4_SW_LOCKED, mask_sh),\
+ SF(MCIF_WB_BUF_4_STATUS, MCIF_WB_BUF_4_VCE_LOCKED, mask_sh),\
+ SF(MCIF_WB_BUF_4_STATUS, MCIF_WB_BUF_4_OVERFLOW, mask_sh),\
+ SF(MCIF_WB_BUF_4_STATUS, MCIF_WB_BUF_4_DISABLE, mask_sh),\
+ SF(MCIF_WB_BUF_4_STATUS, MCIF_WB_BUF_4_MODE, mask_sh),\
+ SF(MCIF_WB_BUF_4_STATUS, MCIF_WB_BUF_4_BUFTAG, mask_sh),\
+ SF(MCIF_WB_BUF_4_STATUS, MCIF_WB_BUF_4_NXT_BUF, mask_sh),\
+ SF(MCIF_WB_BUF_4_STATUS, MCIF_WB_BUF_4_CUR_LINE_L, mask_sh),\
+ SF(MCIF_WB_BUF_4_STATUS2, MCIF_WB_BUF_4_NEW_CONTENT, mask_sh),\
+ SF(MCIF_WB_BUF_4_STATUS2, MCIF_WB_BUF_4_COLOR_DEPTH, mask_sh),\
+ SF(MCIF_WB_BUF_4_STATUS2, MCIF_WB_BUF_4_TMZ_BLACK_PIXEL, mask_sh),\
+ SF(MCIF_WB_BUF_4_STATUS2, MCIF_WB_BUF_4_TMZ, mask_sh),\
+ SF(MCIF_WB_BUF_4_STATUS2, MCIF_WB_BUF_4_Y_OVERRUN, mask_sh),\
+ SF(MCIF_WB_BUF_4_STATUS2, MCIF_WB_BUF_4_C_OVERRUN, mask_sh),\
+ SF(MCIF_WB_ARBITRATION_CONTROL, MCIF_WB_CLIENT_ARBITRATION_SLICE, mask_sh),\
+ SF(MCIF_WB_ARBITRATION_CONTROL, MCIF_WB_TIME_PER_PIXEL, mask_sh),\
+ SF(MCIF_WB_SCLK_CHANGE, WM_CHANGE_ACK_FORCE_ON, mask_sh),\
+ SF(MCIF_WB_BUF_1_ADDR_Y, MCIF_WB_BUF_1_ADDR_Y, mask_sh),\
+ SF(MCIF_WB_BUF_1_ADDR_C, MCIF_WB_BUF_1_ADDR_C, mask_sh),\
+ SF(MCIF_WB_BUF_2_ADDR_Y, MCIF_WB_BUF_2_ADDR_Y, mask_sh),\
+ SF(MCIF_WB_BUF_2_ADDR_C, MCIF_WB_BUF_2_ADDR_C, mask_sh),\
+ SF(MCIF_WB_BUF_3_ADDR_Y, MCIF_WB_BUF_3_ADDR_Y, mask_sh),\
+ SF(MCIF_WB_BUF_3_ADDR_C, MCIF_WB_BUF_3_ADDR_C, mask_sh),\
+ SF(MCIF_WB_BUF_4_ADDR_Y, MCIF_WB_BUF_4_ADDR_Y, mask_sh),\
+ SF(MCIF_WB_BUF_4_ADDR_C, MCIF_WB_BUF_4_ADDR_C, mask_sh),\
+ SF(MCIF_WB_BUFMGR_VCE_CONTROL, MCIF_WB_BUFMGR_VCE_LOCK_IGNORE, mask_sh),\
+ SF(MCIF_WB_BUFMGR_VCE_CONTROL, MCIF_WB_BUFMGR_VCE_INT_EN, mask_sh),\
+ SF(MCIF_WB_BUFMGR_VCE_CONTROL, MCIF_WB_BUFMGR_VCE_INT_ACK, mask_sh),\
+ SF(MCIF_WB_BUFMGR_VCE_CONTROL, MCIF_WB_BUFMGR_VCE_SLICE_INT_EN, mask_sh),\
+ SF(MCIF_WB_BUFMGR_VCE_CONTROL, MCIF_WB_BUFMGR_VCE_LOCK, mask_sh),\
+ SF(MCIF_WB_BUFMGR_VCE_CONTROL, MCIF_WB_BUFMGR_SLICE_SIZE, mask_sh),\
+ SF(MCIF_WB_NB_PSTATE_LATENCY_WATERMARK, NB_PSTATE_CHANGE_REFRESH_WATERMARK, mask_sh),\
+ SF(MCIF_WB_NB_PSTATE_LATENCY_WATERMARK, NB_PSTATE_CHANGE_WATERMARK_MASK, mask_sh),\
+ SF(MCIF_WB_NB_PSTATE_CONTROL, NB_PSTATE_CHANGE_URGENT_DURING_REQUEST, mask_sh),\
+ SF(MCIF_WB_NB_PSTATE_CONTROL, NB_PSTATE_CHANGE_FORCE_ON, mask_sh),\
+ SF(MCIF_WB_NB_PSTATE_CONTROL, NB_PSTATE_ALLOW_FOR_URGENT, mask_sh),\
+ SF(MCIF_WB_WATERMARK, MCIF_WB_CLI_WATERMARK, mask_sh),\
+ SF(MCIF_WB_WATERMARK, MCIF_WB_CLI_WATERMARK_MASK, mask_sh),\
+ SF(MCIF_WB_CLOCK_GATER_CONTROL, MCIF_WB_CLI_CLOCK_GATER_OVERRIDE, mask_sh),\
+ SF(MCIF_WB_SELF_REFRESH_CONTROL, DIS_REFRESH_UNDER_NBPREQ, mask_sh),\
+ SF(MCIF_WB_SELF_REFRESH_CONTROL, PERFRAME_SELF_REFRESH, mask_sh),\
+ SF(MULTI_LEVEL_QOS_CTRL, MAX_SCALED_TIME_TO_URGENT, mask_sh),\
+ SF(MCIF_WB_BUF_LUMA_SIZE, MCIF_WB_BUF_LUMA_SIZE, mask_sh),\
+ SF(MCIF_WB_BUF_CHROMA_SIZE, MCIF_WB_BUF_CHROMA_SIZE, mask_sh),\
+ SF(MCIF_WB_BUF_1_ADDR_Y_HIGH, MCIF_WB_BUF_1_ADDR_Y_HIGH, mask_sh),\
+ SF(MCIF_WB_BUF_1_ADDR_C_HIGH, MCIF_WB_BUF_1_ADDR_C_HIGH, mask_sh),\
+ SF(MCIF_WB_BUF_2_ADDR_Y_HIGH, MCIF_WB_BUF_2_ADDR_Y_HIGH, mask_sh),\
+ SF(MCIF_WB_BUF_2_ADDR_C_HIGH, MCIF_WB_BUF_2_ADDR_C_HIGH, mask_sh),\
+ SF(MCIF_WB_BUF_3_ADDR_Y_HIGH, MCIF_WB_BUF_3_ADDR_Y_HIGH, mask_sh),\
+ SF(MCIF_WB_BUF_3_ADDR_C_HIGH, MCIF_WB_BUF_3_ADDR_C_HIGH, mask_sh),\
+ SF(MCIF_WB_BUF_4_ADDR_Y_HIGH, MCIF_WB_BUF_4_ADDR_Y_HIGH, mask_sh),\
+ SF(MCIF_WB_BUF_4_ADDR_C_HIGH, MCIF_WB_BUF_4_ADDR_C_HIGH, mask_sh),\
+ SF(MCIF_WB_BUF_1_RESOLUTION, MCIF_WB_BUF_1_RESOLUTION_WIDTH, mask_sh),\
+ SF(MCIF_WB_BUF_1_RESOLUTION, MCIF_WB_BUF_1_RESOLUTION_HEIGHT, mask_sh),\
+ SF(MCIF_WB_BUF_2_RESOLUTION, MCIF_WB_BUF_2_RESOLUTION_WIDTH, mask_sh),\
+ SF(MCIF_WB_BUF_2_RESOLUTION, MCIF_WB_BUF_2_RESOLUTION_HEIGHT, mask_sh),\
+ SF(MCIF_WB_BUF_3_RESOLUTION, MCIF_WB_BUF_3_RESOLUTION_WIDTH, mask_sh),\
+ SF(MCIF_WB_BUF_3_RESOLUTION, MCIF_WB_BUF_3_RESOLUTION_HEIGHT, mask_sh),\
+ SF(MCIF_WB_BUF_4_RESOLUTION, MCIF_WB_BUF_4_RESOLUTION_WIDTH, mask_sh),\
+ SF(MCIF_WB_BUF_4_RESOLUTION, MCIF_WB_BUF_4_RESOLUTION_HEIGHT, mask_sh),\
+ SF(MMHUBBUB_WARMUP_ADDR_REGION, MMHUBBUB_WARMUP_ADDR_REGION, mask_sh),\
+ SF(MMHUBBUB_WARMUP_BASE_ADDR_HIGH, MMHUBBUB_WARMUP_BASE_ADDR_HIGH, mask_sh),\
+ SF(MMHUBBUB_WARMUP_BASE_ADDR_LOW, MMHUBBUB_WARMUP_BASE_ADDR_LOW, mask_sh),\
+ SF(MMHUBBUB_WARMUP_CONTROL_STATUS, MMHUBBUB_WARMUP_EN, mask_sh),\
+ SF(MMHUBBUB_WARMUP_CONTROL_STATUS, MMHUBBUB_WARMUP_SW_INT_EN, mask_sh),\
+ SF(MMHUBBUB_WARMUP_CONTROL_STATUS, MMHUBBUB_WARMUP_SW_INT_STATUS, mask_sh),\
+ SF(MMHUBBUB_WARMUP_CONTROL_STATUS, MMHUBBUB_WARMUP_SW_INT_ACK, mask_sh),\
+ SF(MMHUBBUB_WARMUP_CONTROL_STATUS, MMHUBBUB_WARMUP_INC_ADDR, mask_sh),\
+ SF(MCIF_WB_DRAM_SPEED_CHANGE_DURATION_VBI, MCIF_WB_DRAM_SPEED_CHANGE_DURATION_VBI, mask_sh)
+
+
+#define MCIF_WB_REG_FIELD_LIST_DCN3_0(type) \
+ MCIF_WB_REG_FIELD_LIST_DCN2_0(type);\
+ type WBIF_WHOLE_BUF_MODE;\
+ type MMHUBBUB_WARMUP_ADDR_REGION;\
+ type MMHUBBUB_WARMUP_BASE_ADDR_HIGH;\
+ type MMHUBBUB_WARMUP_BASE_ADDR_LOW;\
+ type MMHUBBUB_WARMUP_EN;\
+ type MMHUBBUB_WARMUP_SW_INT_EN;\
+ type MMHUBBUB_WARMUP_SW_INT_STATUS;\
+ type MMHUBBUB_WARMUP_SW_INT_ACK;\
+ type MMHUBBUB_WARMUP_INC_ADDR;\
+ type MMHUBBUB_WARMUP_P_VMID;\
+ type MCIF_WB_DRAM_SPEED_CHANGE_DURATION_VBI
+
+#define MCIF_WB_REG_VARIABLE_LIST_DCN3_0 \
+ MCIF_WB_REG_VARIABLE_LIST_DCN2_0; \
+ uint32_t MMHUBBUB_MEM_PWR_CNTL;\
+ uint32_t MMHUBBUB_WARMUP_ADDR_REGION;\
+ uint32_t MMHUBBUB_WARMUP_BASE_ADDR_HIGH;\
+ uint32_t MMHUBBUB_WARMUP_BASE_ADDR_LOW;\
+ uint32_t MMHUBBUB_WARMUP_CONTROL_STATUS;\
+ uint32_t MMHUBBUB_WARMUP_P_VMID;\
+ uint32_t MCIF_WB_DRAM_SPEED_CHANGE_DURATION_VBI
+
+struct dcn30_mmhubbub_registers {
+ MCIF_WB_REG_VARIABLE_LIST_DCN3_0;
+};
+
+
+struct dcn30_mmhubbub_mask {
+ MCIF_WB_REG_FIELD_LIST_DCN3_0(uint32_t);
+};
+
+struct dcn30_mmhubbub_shift {
+ MCIF_WB_REG_FIELD_LIST_DCN3_0(uint8_t);
+};
+
+struct dcn30_mmhubbub {
+ struct mcif_wb base;
+ const struct dcn30_mmhubbub_registers *mcif_wb_regs;
+ const struct dcn30_mmhubbub_shift *mcif_wb_shift;
+ const struct dcn30_mmhubbub_mask *mcif_wb_mask;
+};
+
+void dcn30_mmhubbub_construct(struct dcn30_mmhubbub *mcif_wb30,
+ struct dc_context *ctx,
+ const struct dcn30_mmhubbub_registers *mcif_wb_regs,
+ const struct dcn30_mmhubbub_shift *mcif_wb_shift,
+ const struct dcn30_mmhubbub_mask *mcif_wb_mask,
+ int inst);
+
+#endif
diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_mpc.c b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_mpc.c
new file mode 100644
index 000000000000..8fadd61a55ec
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_mpc.c
@@ -0,0 +1,1409 @@
+/*
+ * Copyright 2020 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#include "reg_helper.h"
+#include "dcn30_mpc.h"
+#include "dcn30_cm_common.h"
+#include "basics/conversion.h"
+#include "dcn10/dcn10_cm_common.h"
+#include "dc.h"
+
+#define REG(reg)\
+ mpc30->mpc_regs->reg
+
+#define CTX \
+ mpc30->base.ctx
+
+#undef FN
+#define FN(reg_name, field_name) \
+ mpc30->mpc_shift->field_name, mpc30->mpc_mask->field_name
+
+
+#define NUM_ELEMENTS(a) (sizeof(a) / sizeof((a)[0]))
+
+
+static bool mpc3_is_dwb_idle(
+ struct mpc *mpc,
+ int dwb_id)
+{
+ struct dcn30_mpc *mpc30 = TO_DCN30_MPC(mpc);
+ unsigned int status;
+
+ REG_GET(DWB_MUX[dwb_id], MPC_DWB0_MUX_STATUS, &status);
+
+ if (status == 0xf)
+ return true;
+ else
+ return false;
+}
+
+static void mpc3_set_dwb_mux(
+ struct mpc *mpc,
+ int dwb_id,
+ int mpcc_id)
+{
+ struct dcn30_mpc *mpc30 = TO_DCN30_MPC(mpc);
+
+ REG_SET(DWB_MUX[dwb_id], 0,
+ MPC_DWB0_MUX, mpcc_id);
+}
+
+static void mpc3_disable_dwb_mux(
+ struct mpc *mpc,
+ int dwb_id)
+{
+ struct dcn30_mpc *mpc30 = TO_DCN30_MPC(mpc);
+
+ REG_SET(DWB_MUX[dwb_id], 0,
+ MPC_DWB0_MUX, 0xf);
+}
+
+static void mpc3_set_out_rate_control(
+ struct mpc *mpc,
+ int opp_id,
+ bool enable,
+ bool rate_2x_mode,
+ struct mpc_dwb_flow_control *flow_control)
+{
+ struct dcn30_mpc *mpc30 = TO_DCN30_MPC(mpc);
+
+ REG_UPDATE_2(MUX[opp_id],
+ MPC_OUT_RATE_CONTROL_DISABLE, !enable,
+ MPC_OUT_RATE_CONTROL, rate_2x_mode);
+
+ if (flow_control)
+ REG_UPDATE_2(MUX[opp_id],
+ MPC_OUT_FLOW_CONTROL_MODE, flow_control->flow_ctrl_mode,
+ MPC_OUT_FLOW_CONTROL_COUNT, flow_control->flow_ctrl_cnt1);
+}
+
+static enum dc_lut_mode mpc3_get_ogam_current(struct mpc *mpc, int mpcc_id)
+{
+ /*Contrary to DCN2 and DCN1 wherein a single status register field holds this info;
+ *in DCN3/3AG, we need to read two separate fields to retrieve the same info
+ */
+ enum dc_lut_mode mode;
+ uint32_t state_mode;
+ uint32_t state_ram_lut_in_use;
+ struct dcn30_mpc *mpc30 = TO_DCN30_MPC(mpc);
+
+ REG_GET_2(MPCC_OGAM_CONTROL[mpcc_id],
+ MPCC_OGAM_MODE_CURRENT, &state_mode,
+ MPCC_OGAM_SELECT_CURRENT, &state_ram_lut_in_use);
+
+ switch (state_mode) {
+ case 0:
+ mode = LUT_BYPASS;
+ break;
+ case 2:
+ switch (state_ram_lut_in_use) {
+ case 0:
+ mode = LUT_RAM_A;
+ break;
+ case 1:
+ mode = LUT_RAM_B;
+ break;
+ default:
+ mode = LUT_BYPASS;
+ break;
+ }
+ break;
+ default:
+ mode = LUT_BYPASS;
+ break;
+ }
+ return mode;
+}
+
+static void mpc3_power_on_ogam_lut(
+ struct mpc *mpc, int mpcc_id,
+ bool power_on)
+{
+ struct dcn30_mpc *mpc30 = TO_DCN30_MPC(mpc);
+
+ REG_SET(MPCC_MEM_PWR_CTRL[mpcc_id], 0,
+ MPCC_OGAM_MEM_PWR_FORCE, power_on == true ? 0:1);
+}
+
+static void mpc3_configure_ogam_lut(
+ struct mpc *mpc, int mpcc_id,
+ bool is_ram_a)
+{
+ struct dcn30_mpc *mpc30 = TO_DCN30_MPC(mpc);
+
+ REG_UPDATE_2(MPCC_OGAM_LUT_CONTROL[mpcc_id],
+ MPCC_OGAM_LUT_WRITE_COLOR_MASK, 7,
+ MPCC_OGAM_LUT_HOST_SEL, is_ram_a == true ? 0:1);
+
+ REG_SET(MPCC_OGAM_LUT_INDEX[mpcc_id], 0, MPCC_OGAM_LUT_INDEX, 0);
+}
+
+static void mpc3_ogam_get_reg_field(
+ struct mpc *mpc,
+ struct dcn3_xfer_func_reg *reg)
+{
+ struct dcn30_mpc *mpc30 = TO_DCN30_MPC(mpc);
+
+ reg->shifts.field_region_start_base = mpc30->mpc_shift->MPCC_OGAM_RAMA_EXP_REGION_START_BASE_B;
+ reg->masks.field_region_start_base = mpc30->mpc_mask->MPCC_OGAM_RAMA_EXP_REGION_START_BASE_B;
+ reg->shifts.field_offset = mpc30->mpc_shift->MPCC_OGAM_RAMA_OFFSET_B;
+ reg->masks.field_offset = mpc30->mpc_mask->MPCC_OGAM_RAMA_OFFSET_B;
+
+ reg->shifts.exp_region0_lut_offset = mpc30->mpc_shift->MPCC_OGAM_RAMA_EXP_REGION0_LUT_OFFSET;
+ reg->masks.exp_region0_lut_offset = mpc30->mpc_mask->MPCC_OGAM_RAMA_EXP_REGION0_LUT_OFFSET;
+ reg->shifts.exp_region0_num_segments = mpc30->mpc_shift->MPCC_OGAM_RAMA_EXP_REGION0_NUM_SEGMENTS;
+ reg->masks.exp_region0_num_segments = mpc30->mpc_mask->MPCC_OGAM_RAMA_EXP_REGION0_NUM_SEGMENTS;
+ reg->shifts.exp_region1_lut_offset = mpc30->mpc_shift->MPCC_OGAM_RAMA_EXP_REGION1_LUT_OFFSET;
+ reg->masks.exp_region1_lut_offset = mpc30->mpc_mask->MPCC_OGAM_RAMA_EXP_REGION1_LUT_OFFSET;
+ reg->shifts.exp_region1_num_segments = mpc30->mpc_shift->MPCC_OGAM_RAMA_EXP_REGION1_NUM_SEGMENTS;
+ reg->masks.exp_region1_num_segments = mpc30->mpc_mask->MPCC_OGAM_RAMA_EXP_REGION1_NUM_SEGMENTS;
+
+ reg->shifts.field_region_end = mpc30->mpc_shift->MPCC_OGAM_RAMA_EXP_REGION_END_B;
+ reg->masks.field_region_end = mpc30->mpc_mask->MPCC_OGAM_RAMA_EXP_REGION_END_B;
+ reg->shifts.field_region_end_slope = mpc30->mpc_shift->MPCC_OGAM_RAMA_EXP_REGION_END_SLOPE_B;
+ reg->masks.field_region_end_slope = mpc30->mpc_mask->MPCC_OGAM_RAMA_EXP_REGION_END_SLOPE_B;
+ reg->shifts.field_region_end_base = mpc30->mpc_shift->MPCC_OGAM_RAMA_EXP_REGION_END_BASE_B;
+ reg->masks.field_region_end_base = mpc30->mpc_mask->MPCC_OGAM_RAMA_EXP_REGION_END_BASE_B;
+ reg->shifts.field_region_linear_slope = mpc30->mpc_shift->MPCC_OGAM_RAMA_EXP_REGION_START_SLOPE_B;
+ reg->masks.field_region_linear_slope = mpc30->mpc_mask->MPCC_OGAM_RAMA_EXP_REGION_START_SLOPE_B;
+ reg->shifts.exp_region_start = mpc30->mpc_shift->MPCC_OGAM_RAMA_EXP_REGION_START_B;
+ reg->masks.exp_region_start = mpc30->mpc_mask->MPCC_OGAM_RAMA_EXP_REGION_START_B;
+ reg->shifts.exp_resion_start_segment = mpc30->mpc_shift->MPCC_OGAM_RAMA_EXP_REGION_START_SEGMENT_B;
+ reg->masks.exp_resion_start_segment = mpc30->mpc_mask->MPCC_OGAM_RAMA_EXP_REGION_START_SEGMENT_B;
+}
+
+static void mpc3_program_luta(struct mpc *mpc, int mpcc_id,
+ const struct pwl_params *params)
+{
+ struct dcn30_mpc *mpc30 = TO_DCN30_MPC(mpc);
+ struct dcn3_xfer_func_reg gam_regs;
+
+ mpc3_ogam_get_reg_field(mpc, &gam_regs);
+
+ gam_regs.start_cntl_b = REG(MPCC_OGAM_RAMA_START_CNTL_B[mpcc_id]);
+ gam_regs.start_cntl_g = REG(MPCC_OGAM_RAMA_START_CNTL_G[mpcc_id]);
+ gam_regs.start_cntl_r = REG(MPCC_OGAM_RAMA_START_CNTL_R[mpcc_id]);
+ gam_regs.start_slope_cntl_b = REG(MPCC_OGAM_RAMA_START_SLOPE_CNTL_B[mpcc_id]);
+ gam_regs.start_slope_cntl_g = REG(MPCC_OGAM_RAMA_START_SLOPE_CNTL_G[mpcc_id]);
+ gam_regs.start_slope_cntl_r = REG(MPCC_OGAM_RAMA_START_SLOPE_CNTL_R[mpcc_id]);
+ gam_regs.start_end_cntl1_b = REG(MPCC_OGAM_RAMA_END_CNTL1_B[mpcc_id]);
+ gam_regs.start_end_cntl2_b = REG(MPCC_OGAM_RAMA_END_CNTL2_B[mpcc_id]);
+ gam_regs.start_end_cntl1_g = REG(MPCC_OGAM_RAMA_END_CNTL1_G[mpcc_id]);
+ gam_regs.start_end_cntl2_g = REG(MPCC_OGAM_RAMA_END_CNTL2_G[mpcc_id]);
+ gam_regs.start_end_cntl1_r = REG(MPCC_OGAM_RAMA_END_CNTL1_R[mpcc_id]);
+ gam_regs.start_end_cntl2_r = REG(MPCC_OGAM_RAMA_END_CNTL2_R[mpcc_id]);
+ gam_regs.region_start = REG(MPCC_OGAM_RAMA_REGION_0_1[mpcc_id]);
+ gam_regs.region_end = REG(MPCC_OGAM_RAMA_REGION_32_33[mpcc_id]);
+ //New registers in DCN3AG/DCN OGAM block
+ gam_regs.offset_b = REG(MPCC_OGAM_RAMA_OFFSET_B[mpcc_id]);
+ gam_regs.offset_g = REG(MPCC_OGAM_RAMA_OFFSET_G[mpcc_id]);
+ gam_regs.offset_r = REG(MPCC_OGAM_RAMA_OFFSET_R[mpcc_id]);
+ gam_regs.start_base_cntl_b = REG(MPCC_OGAM_RAMA_START_BASE_CNTL_B[mpcc_id]);
+ gam_regs.start_base_cntl_g = REG(MPCC_OGAM_RAMA_START_BASE_CNTL_G[mpcc_id]);
+ gam_regs.start_base_cntl_r = REG(MPCC_OGAM_RAMA_START_BASE_CNTL_R[mpcc_id]);
+
+ cm_helper_program_gamcor_xfer_func(mpc30->base.ctx, params, &gam_regs);
+}
+
+static void mpc3_program_lutb(struct mpc *mpc, int mpcc_id,
+ const struct pwl_params *params)
+{
+ struct dcn30_mpc *mpc30 = TO_DCN30_MPC(mpc);
+ struct dcn3_xfer_func_reg gam_regs;
+
+ mpc3_ogam_get_reg_field(mpc, &gam_regs);
+
+ gam_regs.start_cntl_b = REG(MPCC_OGAM_RAMB_START_CNTL_B[mpcc_id]);
+ gam_regs.start_cntl_g = REG(MPCC_OGAM_RAMB_START_CNTL_G[mpcc_id]);
+ gam_regs.start_cntl_r = REG(MPCC_OGAM_RAMB_START_CNTL_R[mpcc_id]);
+ gam_regs.start_slope_cntl_b = REG(MPCC_OGAM_RAMB_START_SLOPE_CNTL_B[mpcc_id]);
+ gam_regs.start_slope_cntl_g = REG(MPCC_OGAM_RAMB_START_SLOPE_CNTL_G[mpcc_id]);
+ gam_regs.start_slope_cntl_r = REG(MPCC_OGAM_RAMB_START_SLOPE_CNTL_R[mpcc_id]);
+ gam_regs.start_end_cntl1_b = REG(MPCC_OGAM_RAMB_END_CNTL1_B[mpcc_id]);
+ gam_regs.start_end_cntl2_b = REG(MPCC_OGAM_RAMB_END_CNTL2_B[mpcc_id]);
+ gam_regs.start_end_cntl1_g = REG(MPCC_OGAM_RAMB_END_CNTL1_G[mpcc_id]);
+ gam_regs.start_end_cntl2_g = REG(MPCC_OGAM_RAMB_END_CNTL2_G[mpcc_id]);
+ gam_regs.start_end_cntl1_r = REG(MPCC_OGAM_RAMB_END_CNTL1_R[mpcc_id]);
+ gam_regs.start_end_cntl2_r = REG(MPCC_OGAM_RAMB_END_CNTL2_R[mpcc_id]);
+ gam_regs.region_start = REG(MPCC_OGAM_RAMB_REGION_0_1[mpcc_id]);
+ gam_regs.region_end = REG(MPCC_OGAM_RAMB_REGION_32_33[mpcc_id]);
+ //New registers in DCN3AG/DCN OGAM block
+ gam_regs.offset_b = REG(MPCC_OGAM_RAMB_OFFSET_B[mpcc_id]);
+ gam_regs.offset_g = REG(MPCC_OGAM_RAMB_OFFSET_G[mpcc_id]);
+ gam_regs.offset_r = REG(MPCC_OGAM_RAMB_OFFSET_R[mpcc_id]);
+ gam_regs.start_base_cntl_b = REG(MPCC_OGAM_RAMB_START_BASE_CNTL_B[mpcc_id]);
+ gam_regs.start_base_cntl_g = REG(MPCC_OGAM_RAMB_START_BASE_CNTL_G[mpcc_id]);
+ gam_regs.start_base_cntl_r = REG(MPCC_OGAM_RAMB_START_BASE_CNTL_R[mpcc_id]);
+
+ cm_helper_program_gamcor_xfer_func(mpc30->base.ctx, params, &gam_regs);
+}
+
+
+static void mpc3_program_ogam_pwl(
+ struct mpc *mpc, int mpcc_id,
+ const struct pwl_result_data *rgb,
+ uint32_t num)
+{
+ uint32_t i;
+ struct dcn30_mpc *mpc30 = TO_DCN30_MPC(mpc);
+ uint32_t last_base_value_red = rgb[num-1].red_reg + rgb[num-1].delta_red_reg;
+ uint32_t last_base_value_green = rgb[num-1].green_reg + rgb[num-1].delta_green_reg;
+ uint32_t last_base_value_blue = rgb[num-1].blue_reg + rgb[num-1].delta_blue_reg;
+
+ /*the entries of DCN3AG gamma LUTs take 18bit base values as opposed to
+ *38 base+delta values per entry in earlier DCN architectures
+ *last base value for our lut is compute by adding the last base value
+ *in our data + last delta
+ */
+
+ if (is_rgb_equal(rgb, num)) {
+ for (i = 0 ; i < num; i++)
+ REG_SET(MPCC_OGAM_LUT_DATA[mpcc_id], 0, MPCC_OGAM_LUT_DATA, rgb[i].red_reg);
+
+ REG_SET(MPCC_OGAM_LUT_DATA[mpcc_id], 0, MPCC_OGAM_LUT_DATA, last_base_value_red);
+
+ } else {
+
+ REG_UPDATE(MPCC_OGAM_LUT_CONTROL[mpcc_id],
+ MPCC_OGAM_LUT_WRITE_COLOR_MASK, 4);
+
+ for (i = 0 ; i < num; i++)
+ REG_SET(MPCC_OGAM_LUT_DATA[mpcc_id], 0, MPCC_OGAM_LUT_DATA, rgb[i].red_reg);
+
+ REG_SET(MPCC_OGAM_LUT_DATA[mpcc_id], 0, MPCC_OGAM_LUT_DATA, last_base_value_red);
+
+ REG_SET(MPCC_OGAM_LUT_INDEX[mpcc_id], 0, MPCC_OGAM_LUT_INDEX, 0);
+
+ REG_UPDATE(MPCC_OGAM_LUT_CONTROL[mpcc_id],
+ MPCC_OGAM_LUT_WRITE_COLOR_MASK, 2);
+
+ for (i = 0 ; i < num; i++)
+ REG_SET(MPCC_OGAM_LUT_DATA[mpcc_id], 0, MPCC_OGAM_LUT_DATA, rgb[i].green_reg);
+
+ REG_SET(MPCC_OGAM_LUT_DATA[mpcc_id], 0, MPCC_OGAM_LUT_DATA, last_base_value_green);
+
+ REG_SET(MPCC_OGAM_LUT_INDEX[mpcc_id], 0, MPCC_OGAM_LUT_INDEX, 0);
+
+ REG_UPDATE(MPCC_OGAM_LUT_CONTROL[mpcc_id],
+ MPCC_OGAM_LUT_WRITE_COLOR_MASK, 1);
+
+ for (i = 0 ; i < num; i++)
+ REG_SET(MPCC_OGAM_LUT_DATA[mpcc_id], 0, MPCC_OGAM_LUT_DATA, rgb[i].blue_reg);
+
+ REG_SET(MPCC_OGAM_LUT_DATA[mpcc_id], 0, MPCC_OGAM_LUT_DATA, last_base_value_blue);
+ }
+
+}
+
+void mpc3_set_output_gamma(
+ struct mpc *mpc,
+ int mpcc_id,
+ const struct pwl_params *params)
+{
+ enum dc_lut_mode current_mode;
+ enum dc_lut_mode next_mode;
+ struct dcn30_mpc *mpc30 = TO_DCN30_MPC(mpc);
+
+ if (mpc->ctx->dc->debug.cm_in_bypass) {
+ REG_SET(MPCC_OGAM_MODE[mpcc_id], 0, MPCC_OGAM_MODE, 0);
+ return;
+ }
+
+ if (params == NULL) { //disable OGAM
+ REG_SET(MPCC_OGAM_CONTROL[mpcc_id], 0, MPCC_OGAM_MODE, 0);
+ return;
+ }
+ //enable OGAM
+ REG_SET(MPCC_OGAM_CONTROL[mpcc_id], 0, MPCC_OGAM_MODE, 2);
+
+ current_mode = mpc3_get_ogam_current(mpc, mpcc_id);
+ if (current_mode == LUT_BYPASS)
+ next_mode = LUT_RAM_A;
+ else if (current_mode == LUT_RAM_A)
+ next_mode = LUT_RAM_B;
+ else
+ next_mode = LUT_RAM_A;
+
+ mpc3_power_on_ogam_lut(mpc, mpcc_id, true);
+ mpc3_configure_ogam_lut(mpc, mpcc_id, next_mode == LUT_RAM_A ? true:false);
+
+ if (next_mode == LUT_RAM_A)
+ mpc3_program_luta(mpc, mpcc_id, params);
+ else
+ mpc3_program_lutb(mpc, mpcc_id, params);
+
+ mpc3_program_ogam_pwl(
+ mpc, mpcc_id, params->rgb_resulted, params->hw_points_num);
+
+ /*we need to program 2 fields here as apposed to 1*/
+ REG_UPDATE(MPCC_OGAM_CONTROL[mpcc_id],
+ MPCC_OGAM_SELECT, next_mode == LUT_RAM_A ? 0:1);
+}
+
+void mpc3_set_denorm(
+ struct mpc *mpc,
+ int opp_id,
+ enum dc_color_depth output_depth)
+{
+ struct dcn30_mpc *mpc30 = TO_DCN30_MPC(mpc);
+ /* De-normalize Fixed U1.13 color data to different target bit depths. 0 is bypass*/
+ int denorm_mode = 0;
+
+ switch (output_depth) {
+ case COLOR_DEPTH_666:
+ denorm_mode = 1;
+ break;
+ case COLOR_DEPTH_888:
+ denorm_mode = 2;
+ break;
+ case COLOR_DEPTH_999:
+ denorm_mode = 3;
+ break;
+ case COLOR_DEPTH_101010:
+ denorm_mode = 4;
+ break;
+ case COLOR_DEPTH_111111:
+ denorm_mode = 5;
+ break;
+ case COLOR_DEPTH_121212:
+ denorm_mode = 6;
+ break;
+ case COLOR_DEPTH_141414:
+ case COLOR_DEPTH_161616:
+ default:
+ /* not valid used case! */
+ break;
+ }
+
+ REG_UPDATE(DENORM_CONTROL[opp_id],
+ MPC_OUT_DENORM_MODE, denorm_mode);
+}
+
+void mpc3_set_denorm_clamp(
+ struct mpc *mpc,
+ int opp_id,
+ struct mpc_denorm_clamp denorm_clamp)
+{
+ struct dcn30_mpc *mpc30 = TO_DCN30_MPC(mpc);
+
+ /*program min and max clamp values for the pixel components*/
+ REG_UPDATE_2(DENORM_CONTROL[opp_id],
+ MPC_OUT_DENORM_CLAMP_MAX_R_CR, denorm_clamp.clamp_max_r_cr,
+ MPC_OUT_DENORM_CLAMP_MIN_R_CR, denorm_clamp.clamp_min_r_cr);
+ REG_UPDATE_2(DENORM_CLAMP_G_Y[opp_id],
+ MPC_OUT_DENORM_CLAMP_MAX_G_Y, denorm_clamp.clamp_max_g_y,
+ MPC_OUT_DENORM_CLAMP_MIN_G_Y, denorm_clamp.clamp_min_g_y);
+ REG_UPDATE_2(DENORM_CLAMP_B_CB[opp_id],
+ MPC_OUT_DENORM_CLAMP_MAX_B_CB, denorm_clamp.clamp_max_b_cb,
+ MPC_OUT_DENORM_CLAMP_MIN_B_CB, denorm_clamp.clamp_min_b_cb);
+}
+
+static enum dc_lut_mode mpc3_get_shaper_current(struct mpc *mpc, uint32_t rmu_idx)
+{
+ enum dc_lut_mode mode;
+ uint32_t state_mode;
+ struct dcn30_mpc *mpc30 = TO_DCN30_MPC(mpc);
+
+ REG_GET(SHAPER_CONTROL[rmu_idx],
+ MPC_RMU_SHAPER_LUT_MODE_CURRENT, &state_mode);
+
+ switch (state_mode) {
+ case 0:
+ mode = LUT_BYPASS;
+ break;
+ case 1:
+ mode = LUT_RAM_A;
+ break;
+ case 2:
+ mode = LUT_RAM_B;
+ break;
+ default:
+ mode = LUT_BYPASS;
+ break;
+ }
+ return mode;
+}
+
+static void mpc3_configure_shaper_lut(
+ struct mpc *mpc,
+ bool is_ram_a,
+ uint32_t rmu_idx)
+{
+ struct dcn30_mpc *mpc30 = TO_DCN30_MPC(mpc);
+
+ REG_UPDATE(SHAPER_LUT_WRITE_EN_MASK[rmu_idx],
+ MPC_RMU_SHAPER_LUT_WRITE_EN_MASK, 7);
+ REG_UPDATE(SHAPER_LUT_WRITE_EN_MASK[rmu_idx],
+ MPC_RMU_SHAPER_LUT_WRITE_SEL, is_ram_a == true ? 0:1);
+ REG_SET(SHAPER_LUT_INDEX[rmu_idx], 0, MPC_RMU_SHAPER_LUT_INDEX, 0);
+}
+
+static void mpc3_program_shaper_luta_settings(
+ struct mpc *mpc,
+ const struct pwl_params *params,
+ uint32_t rmu_idx)
+{
+ const struct gamma_curve *curve;
+ struct dcn30_mpc *mpc30 = TO_DCN30_MPC(mpc);
+
+ REG_SET_2(SHAPER_RAMA_START_CNTL_B[rmu_idx], 0,
+ MPC_RMU_SHAPER_RAMA_EXP_REGION_START_B, params->corner_points[0].blue.custom_float_x,
+ MPC_RMU_SHAPER_RAMA_EXP_REGION_START_SEGMENT_B, 0);
+ REG_SET_2(SHAPER_RAMA_START_CNTL_G[rmu_idx], 0,
+ MPC_RMU_SHAPER_RAMA_EXP_REGION_START_B, params->corner_points[0].green.custom_float_x,
+ MPC_RMU_SHAPER_RAMA_EXP_REGION_START_SEGMENT_B, 0);
+ REG_SET_2(SHAPER_RAMA_START_CNTL_R[rmu_idx], 0,
+ MPC_RMU_SHAPER_RAMA_EXP_REGION_START_B, params->corner_points[0].red.custom_float_x,
+ MPC_RMU_SHAPER_RAMA_EXP_REGION_START_SEGMENT_B, 0);
+
+ REG_SET_2(SHAPER_RAMA_END_CNTL_B[rmu_idx], 0,
+ MPC_RMU_SHAPER_RAMA_EXP_REGION_END_B, params->corner_points[1].blue.custom_float_x,
+ MPC_RMU_SHAPER_RAMA_EXP_REGION_END_BASE_B, params->corner_points[1].blue.custom_float_y);
+ REG_SET_2(SHAPER_RAMA_END_CNTL_G[rmu_idx], 0,
+ MPC_RMU_SHAPER_RAMA_EXP_REGION_END_B, params->corner_points[1].green.custom_float_x,
+ MPC_RMU_SHAPER_RAMA_EXP_REGION_END_BASE_B, params->corner_points[1].green.custom_float_y);
+ REG_SET_2(SHAPER_RAMA_END_CNTL_R[rmu_idx], 0,
+ MPC_RMU_SHAPER_RAMA_EXP_REGION_END_B, params->corner_points[1].red.custom_float_x,
+ MPC_RMU_SHAPER_RAMA_EXP_REGION_END_BASE_B, params->corner_points[1].red.custom_float_y);
+
+ curve = params->arr_curve_points;
+ REG_SET_4(SHAPER_RAMA_REGION_0_1[rmu_idx], 0,
+ MPC_RMU_SHAPER_RAMA_EXP_REGION0_LUT_OFFSET, curve[0].offset,
+ MPC_RMU_SHAPER_RAMA_EXP_REGION0_NUM_SEGMENTS, curve[0].segments_num,
+ MPC_RMU_SHAPER_RAMA_EXP_REGION1_LUT_OFFSET, curve[1].offset,
+ MPC_RMU_SHAPER_RAMA_EXP_REGION1_NUM_SEGMENTS, curve[1].segments_num);
+
+ curve += 2;
+ REG_SET_4(SHAPER_RAMA_REGION_2_3[rmu_idx], 0,
+ MPC_RMU_SHAPER_RAMA_EXP_REGION0_LUT_OFFSET, curve[0].offset,
+ MPC_RMU_SHAPER_RAMA_EXP_REGION0_NUM_SEGMENTS, curve[0].segments_num,
+ MPC_RMU_SHAPER_RAMA_EXP_REGION1_LUT_OFFSET, curve[1].offset,
+ MPC_RMU_SHAPER_RAMA_EXP_REGION1_NUM_SEGMENTS, curve[1].segments_num);
+
+ curve += 2;
+ REG_SET_4(SHAPER_RAMA_REGION_4_5[rmu_idx], 0,
+ MPC_RMU_SHAPER_RAMA_EXP_REGION0_LUT_OFFSET, curve[0].offset,
+ MPC_RMU_SHAPER_RAMA_EXP_REGION0_NUM_SEGMENTS, curve[0].segments_num,
+ MPC_RMU_SHAPER_RAMA_EXP_REGION1_LUT_OFFSET, curve[1].offset,
+ MPC_RMU_SHAPER_RAMA_EXP_REGION1_NUM_SEGMENTS, curve[1].segments_num);
+
+ curve += 2;
+ REG_SET_4(SHAPER_RAMA_REGION_6_7[rmu_idx], 0,
+ MPC_RMU_SHAPER_RAMA_EXP_REGION0_LUT_OFFSET, curve[0].offset,
+ MPC_RMU_SHAPER_RAMA_EXP_REGION0_NUM_SEGMENTS, curve[0].segments_num,
+ MPC_RMU_SHAPER_RAMA_EXP_REGION1_LUT_OFFSET, curve[1].offset,
+ MPC_RMU_SHAPER_RAMA_EXP_REGION1_NUM_SEGMENTS, curve[1].segments_num);
+
+ curve += 2;
+ REG_SET_4(SHAPER_RAMA_REGION_8_9[rmu_idx], 0,
+ MPC_RMU_SHAPER_RAMA_EXP_REGION0_LUT_OFFSET, curve[0].offset,
+ MPC_RMU_SHAPER_RAMA_EXP_REGION0_NUM_SEGMENTS, curve[0].segments_num,
+ MPC_RMU_SHAPER_RAMA_EXP_REGION1_LUT_OFFSET, curve[1].offset,
+ MPC_RMU_SHAPER_RAMA_EXP_REGION1_NUM_SEGMENTS, curve[1].segments_num);
+
+ curve += 2;
+ REG_SET_4(SHAPER_RAMA_REGION_10_11[rmu_idx], 0,
+ MPC_RMU_SHAPER_RAMA_EXP_REGION0_LUT_OFFSET, curve[0].offset,
+ MPC_RMU_SHAPER_RAMA_EXP_REGION0_NUM_SEGMENTS, curve[0].segments_num,
+ MPC_RMU_SHAPER_RAMA_EXP_REGION1_LUT_OFFSET, curve[1].offset,
+ MPC_RMU_SHAPER_RAMA_EXP_REGION1_NUM_SEGMENTS, curve[1].segments_num);
+
+ curve += 2;
+ REG_SET_4(SHAPER_RAMA_REGION_12_13[rmu_idx], 0,
+ MPC_RMU_SHAPER_RAMA_EXP_REGION0_LUT_OFFSET, curve[0].offset,
+ MPC_RMU_SHAPER_RAMA_EXP_REGION0_NUM_SEGMENTS, curve[0].segments_num,
+ MPC_RMU_SHAPER_RAMA_EXP_REGION1_LUT_OFFSET, curve[1].offset,
+ MPC_RMU_SHAPER_RAMA_EXP_REGION1_NUM_SEGMENTS, curve[1].segments_num);
+
+ curve += 2;
+ REG_SET_4(SHAPER_RAMA_REGION_14_15[rmu_idx], 0,
+ MPC_RMU_SHAPER_RAMA_EXP_REGION0_LUT_OFFSET, curve[0].offset,
+ MPC_RMU_SHAPER_RAMA_EXP_REGION0_NUM_SEGMENTS, curve[0].segments_num,
+ MPC_RMU_SHAPER_RAMA_EXP_REGION1_LUT_OFFSET, curve[1].offset,
+ MPC_RMU_SHAPER_RAMA_EXP_REGION1_NUM_SEGMENTS, curve[1].segments_num);
+
+
+ curve += 2;
+ REG_SET_4(SHAPER_RAMA_REGION_16_17[rmu_idx], 0,
+ MPC_RMU_SHAPER_RAMA_EXP_REGION0_LUT_OFFSET, curve[0].offset,
+ MPC_RMU_SHAPER_RAMA_EXP_REGION0_NUM_SEGMENTS, curve[0].segments_num,
+ MPC_RMU_SHAPER_RAMA_EXP_REGION1_LUT_OFFSET, curve[1].offset,
+ MPC_RMU_SHAPER_RAMA_EXP_REGION1_NUM_SEGMENTS, curve[1].segments_num);
+
+ curve += 2;
+ REG_SET_4(SHAPER_RAMA_REGION_18_19[rmu_idx], 0,
+ MPC_RMU_SHAPER_RAMA_EXP_REGION0_LUT_OFFSET, curve[0].offset,
+ MPC_RMU_SHAPER_RAMA_EXP_REGION0_NUM_SEGMENTS, curve[0].segments_num,
+ MPC_RMU_SHAPER_RAMA_EXP_REGION1_LUT_OFFSET, curve[1].offset,
+ MPC_RMU_SHAPER_RAMA_EXP_REGION1_NUM_SEGMENTS, curve[1].segments_num);
+
+ curve += 2;
+ REG_SET_4(SHAPER_RAMA_REGION_20_21[rmu_idx], 0,
+ MPC_RMU_SHAPER_RAMA_EXP_REGION0_LUT_OFFSET, curve[0].offset,
+ MPC_RMU_SHAPER_RAMA_EXP_REGION0_NUM_SEGMENTS, curve[0].segments_num,
+ MPC_RMU_SHAPER_RAMA_EXP_REGION1_LUT_OFFSET, curve[1].offset,
+ MPC_RMU_SHAPER_RAMA_EXP_REGION1_NUM_SEGMENTS, curve[1].segments_num);
+
+ curve += 2;
+ REG_SET_4(SHAPER_RAMA_REGION_22_23[rmu_idx], 0,
+ MPC_RMU_SHAPER_RAMA_EXP_REGION0_LUT_OFFSET, curve[0].offset,
+ MPC_RMU_SHAPER_RAMA_EXP_REGION0_NUM_SEGMENTS, curve[0].segments_num,
+ MPC_RMU_SHAPER_RAMA_EXP_REGION1_LUT_OFFSET, curve[1].offset,
+ MPC_RMU_SHAPER_RAMA_EXP_REGION1_NUM_SEGMENTS, curve[1].segments_num);
+
+ curve += 2;
+ REG_SET_4(SHAPER_RAMA_REGION_24_25[rmu_idx], 0,
+ MPC_RMU_SHAPER_RAMA_EXP_REGION0_LUT_OFFSET, curve[0].offset,
+ MPC_RMU_SHAPER_RAMA_EXP_REGION0_NUM_SEGMENTS, curve[0].segments_num,
+ MPC_RMU_SHAPER_RAMA_EXP_REGION1_LUT_OFFSET, curve[1].offset,
+ MPC_RMU_SHAPER_RAMA_EXP_REGION1_NUM_SEGMENTS, curve[1].segments_num);
+
+ curve += 2;
+ REG_SET_4(SHAPER_RAMA_REGION_26_27[rmu_idx], 0,
+ MPC_RMU_SHAPER_RAMA_EXP_REGION0_LUT_OFFSET, curve[0].offset,
+ MPC_RMU_SHAPER_RAMA_EXP_REGION0_NUM_SEGMENTS, curve[0].segments_num,
+ MPC_RMU_SHAPER_RAMA_EXP_REGION1_LUT_OFFSET, curve[1].offset,
+ MPC_RMU_SHAPER_RAMA_EXP_REGION1_NUM_SEGMENTS, curve[1].segments_num);
+
+ curve += 2;
+ REG_SET_4(SHAPER_RAMA_REGION_28_29[rmu_idx], 0,
+ MPC_RMU_SHAPER_RAMA_EXP_REGION0_LUT_OFFSET, curve[0].offset,
+ MPC_RMU_SHAPER_RAMA_EXP_REGION0_NUM_SEGMENTS, curve[0].segments_num,
+ MPC_RMU_SHAPER_RAMA_EXP_REGION1_LUT_OFFSET, curve[1].offset,
+ MPC_RMU_SHAPER_RAMA_EXP_REGION1_NUM_SEGMENTS, curve[1].segments_num);
+
+ curve += 2;
+ REG_SET_4(SHAPER_RAMA_REGION_30_31[rmu_idx], 0,
+ MPC_RMU_SHAPER_RAMA_EXP_REGION0_LUT_OFFSET, curve[0].offset,
+ MPC_RMU_SHAPER_RAMA_EXP_REGION0_NUM_SEGMENTS, curve[0].segments_num,
+ MPC_RMU_SHAPER_RAMA_EXP_REGION1_LUT_OFFSET, curve[1].offset,
+ MPC_RMU_SHAPER_RAMA_EXP_REGION1_NUM_SEGMENTS, curve[1].segments_num);
+
+ curve += 2;
+ REG_SET_4(SHAPER_RAMA_REGION_32_33[rmu_idx], 0,
+ MPC_RMU_SHAPER_RAMA_EXP_REGION0_LUT_OFFSET, curve[0].offset,
+ MPC_RMU_SHAPER_RAMA_EXP_REGION0_NUM_SEGMENTS, curve[0].segments_num,
+ MPC_RMU_SHAPER_RAMA_EXP_REGION1_LUT_OFFSET, curve[1].offset,
+ MPC_RMU_SHAPER_RAMA_EXP_REGION1_NUM_SEGMENTS, curve[1].segments_num);
+}
+
+static void mpc3_program_shaper_lutb_settings(
+ struct mpc *mpc,
+ const struct pwl_params *params,
+ uint32_t rmu_idx)
+{
+ const struct gamma_curve *curve;
+ struct dcn30_mpc *mpc30 = TO_DCN30_MPC(mpc);
+
+ REG_SET_2(SHAPER_RAMB_START_CNTL_B[rmu_idx], 0,
+ MPC_RMU_SHAPER_RAMA_EXP_REGION_START_B, params->corner_points[0].blue.custom_float_x,
+ MPC_RMU_SHAPER_RAMA_EXP_REGION_START_SEGMENT_B, 0);
+ REG_SET_2(SHAPER_RAMB_START_CNTL_G[rmu_idx], 0,
+ MPC_RMU_SHAPER_RAMA_EXP_REGION_START_B, params->corner_points[0].green.custom_float_x,
+ MPC_RMU_SHAPER_RAMA_EXP_REGION_START_SEGMENT_B, 0);
+ REG_SET_2(SHAPER_RAMB_START_CNTL_R[rmu_idx], 0,
+ MPC_RMU_SHAPER_RAMA_EXP_REGION_START_B, params->corner_points[0].red.custom_float_x,
+ MPC_RMU_SHAPER_RAMA_EXP_REGION_START_SEGMENT_B, 0);
+
+ REG_SET_2(SHAPER_RAMB_END_CNTL_B[rmu_idx], 0,
+ MPC_RMU_SHAPER_RAMA_EXP_REGION_END_B, params->corner_points[1].blue.custom_float_x,
+ MPC_RMU_SHAPER_RAMA_EXP_REGION_END_BASE_B, params->corner_points[1].blue.custom_float_y);
+ REG_SET_2(SHAPER_RAMB_END_CNTL_G[rmu_idx], 0,
+ MPC_RMU_SHAPER_RAMA_EXP_REGION_END_B, params->corner_points[1].green.custom_float_x,
+ MPC_RMU_SHAPER_RAMA_EXP_REGION_END_BASE_B, params->corner_points[1].green.custom_float_y);
+ REG_SET_2(SHAPER_RAMB_END_CNTL_R[rmu_idx], 0,
+ MPC_RMU_SHAPER_RAMA_EXP_REGION_END_B, params->corner_points[1].red.custom_float_x,
+ MPC_RMU_SHAPER_RAMA_EXP_REGION_END_BASE_B, params->corner_points[1].red.custom_float_y);
+
+ curve = params->arr_curve_points;
+ REG_SET_4(SHAPER_RAMB_REGION_0_1[rmu_idx], 0,
+ MPC_RMU_SHAPER_RAMA_EXP_REGION0_LUT_OFFSET, curve[0].offset,
+ MPC_RMU_SHAPER_RAMA_EXP_REGION0_NUM_SEGMENTS, curve[0].segments_num,
+ MPC_RMU_SHAPER_RAMA_EXP_REGION1_LUT_OFFSET, curve[1].offset,
+ MPC_RMU_SHAPER_RAMA_EXP_REGION1_NUM_SEGMENTS, curve[1].segments_num);
+
+ curve += 2;
+ REG_SET_4(SHAPER_RAMB_REGION_2_3[rmu_idx], 0,
+ MPC_RMU_SHAPER_RAMA_EXP_REGION0_LUT_OFFSET, curve[0].offset,
+ MPC_RMU_SHAPER_RAMA_EXP_REGION0_NUM_SEGMENTS, curve[0].segments_num,
+ MPC_RMU_SHAPER_RAMA_EXP_REGION1_LUT_OFFSET, curve[1].offset,
+ MPC_RMU_SHAPER_RAMA_EXP_REGION1_NUM_SEGMENTS, curve[1].segments_num);
+
+
+ curve += 2;
+ REG_SET_4(SHAPER_RAMB_REGION_4_5[rmu_idx], 0,
+ MPC_RMU_SHAPER_RAMA_EXP_REGION0_LUT_OFFSET, curve[0].offset,
+ MPC_RMU_SHAPER_RAMA_EXP_REGION0_NUM_SEGMENTS, curve[0].segments_num,
+ MPC_RMU_SHAPER_RAMA_EXP_REGION1_LUT_OFFSET, curve[1].offset,
+ MPC_RMU_SHAPER_RAMA_EXP_REGION1_NUM_SEGMENTS, curve[1].segments_num);
+
+ curve += 2;
+ REG_SET_4(SHAPER_RAMB_REGION_6_7[rmu_idx], 0,
+ MPC_RMU_SHAPER_RAMA_EXP_REGION0_LUT_OFFSET, curve[0].offset,
+ MPC_RMU_SHAPER_RAMA_EXP_REGION0_NUM_SEGMENTS, curve[0].segments_num,
+ MPC_RMU_SHAPER_RAMA_EXP_REGION1_LUT_OFFSET, curve[1].offset,
+ MPC_RMU_SHAPER_RAMA_EXP_REGION1_NUM_SEGMENTS, curve[1].segments_num);
+
+ curve += 2;
+ REG_SET_4(SHAPER_RAMB_REGION_8_9[rmu_idx], 0,
+ MPC_RMU_SHAPER_RAMA_EXP_REGION0_LUT_OFFSET, curve[0].offset,
+ MPC_RMU_SHAPER_RAMA_EXP_REGION0_NUM_SEGMENTS, curve[0].segments_num,
+ MPC_RMU_SHAPER_RAMA_EXP_REGION1_LUT_OFFSET, curve[1].offset,
+ MPC_RMU_SHAPER_RAMA_EXP_REGION1_NUM_SEGMENTS, curve[1].segments_num);
+
+ curve += 2;
+ REG_SET_4(SHAPER_RAMB_REGION_10_11[rmu_idx], 0,
+ MPC_RMU_SHAPER_RAMA_EXP_REGION0_LUT_OFFSET, curve[0].offset,
+ MPC_RMU_SHAPER_RAMA_EXP_REGION0_NUM_SEGMENTS, curve[0].segments_num,
+ MPC_RMU_SHAPER_RAMA_EXP_REGION1_LUT_OFFSET, curve[1].offset,
+ MPC_RMU_SHAPER_RAMA_EXP_REGION1_NUM_SEGMENTS, curve[1].segments_num);
+
+ curve += 2;
+ REG_SET_4(SHAPER_RAMB_REGION_12_13[rmu_idx], 0,
+ MPC_RMU_SHAPER_RAMA_EXP_REGION0_LUT_OFFSET, curve[0].offset,
+ MPC_RMU_SHAPER_RAMA_EXP_REGION0_NUM_SEGMENTS, curve[0].segments_num,
+ MPC_RMU_SHAPER_RAMA_EXP_REGION1_LUT_OFFSET, curve[1].offset,
+ MPC_RMU_SHAPER_RAMA_EXP_REGION1_NUM_SEGMENTS, curve[1].segments_num);
+
+ curve += 2;
+ REG_SET_4(SHAPER_RAMB_REGION_14_15[rmu_idx], 0,
+ MPC_RMU_SHAPER_RAMA_EXP_REGION0_LUT_OFFSET, curve[0].offset,
+ MPC_RMU_SHAPER_RAMA_EXP_REGION0_NUM_SEGMENTS, curve[0].segments_num,
+ MPC_RMU_SHAPER_RAMA_EXP_REGION1_LUT_OFFSET, curve[1].offset,
+ MPC_RMU_SHAPER_RAMA_EXP_REGION1_NUM_SEGMENTS, curve[1].segments_num);
+
+
+ curve += 2;
+ REG_SET_4(SHAPER_RAMB_REGION_16_17[rmu_idx], 0,
+ MPC_RMU_SHAPER_RAMA_EXP_REGION0_LUT_OFFSET, curve[0].offset,
+ MPC_RMU_SHAPER_RAMA_EXP_REGION0_NUM_SEGMENTS, curve[0].segments_num,
+ MPC_RMU_SHAPER_RAMA_EXP_REGION1_LUT_OFFSET, curve[1].offset,
+ MPC_RMU_SHAPER_RAMA_EXP_REGION1_NUM_SEGMENTS, curve[1].segments_num);
+
+ curve += 2;
+ REG_SET_4(SHAPER_RAMB_REGION_18_19[rmu_idx], 0,
+ MPC_RMU_SHAPER_RAMA_EXP_REGION0_LUT_OFFSET, curve[0].offset,
+ MPC_RMU_SHAPER_RAMA_EXP_REGION0_NUM_SEGMENTS, curve[0].segments_num,
+ MPC_RMU_SHAPER_RAMA_EXP_REGION1_LUT_OFFSET, curve[1].offset,
+ MPC_RMU_SHAPER_RAMA_EXP_REGION1_NUM_SEGMENTS, curve[1].segments_num);
+
+ curve += 2;
+ REG_SET_4(SHAPER_RAMB_REGION_20_21[rmu_idx], 0,
+ MPC_RMU_SHAPER_RAMA_EXP_REGION0_LUT_OFFSET, curve[0].offset,
+ MPC_RMU_SHAPER_RAMA_EXP_REGION0_NUM_SEGMENTS, curve[0].segments_num,
+ MPC_RMU_SHAPER_RAMA_EXP_REGION1_LUT_OFFSET, curve[1].offset,
+ MPC_RMU_SHAPER_RAMA_EXP_REGION1_NUM_SEGMENTS, curve[1].segments_num);
+
+ curve += 2;
+ REG_SET_4(SHAPER_RAMB_REGION_22_23[rmu_idx], 0,
+ MPC_RMU_SHAPER_RAMA_EXP_REGION0_LUT_OFFSET, curve[0].offset,
+ MPC_RMU_SHAPER_RAMA_EXP_REGION0_NUM_SEGMENTS, curve[0].segments_num,
+ MPC_RMU_SHAPER_RAMA_EXP_REGION1_LUT_OFFSET, curve[1].offset,
+ MPC_RMU_SHAPER_RAMA_EXP_REGION1_NUM_SEGMENTS, curve[1].segments_num);
+
+ curve += 2;
+ REG_SET_4(SHAPER_RAMB_REGION_24_25[rmu_idx], 0,
+ MPC_RMU_SHAPER_RAMA_EXP_REGION0_LUT_OFFSET, curve[0].offset,
+ MPC_RMU_SHAPER_RAMA_EXP_REGION0_NUM_SEGMENTS, curve[0].segments_num,
+ MPC_RMU_SHAPER_RAMA_EXP_REGION1_LUT_OFFSET, curve[1].offset,
+ MPC_RMU_SHAPER_RAMA_EXP_REGION1_NUM_SEGMENTS, curve[1].segments_num);
+
+ curve += 2;
+ REG_SET_4(SHAPER_RAMB_REGION_26_27[rmu_idx], 0,
+ MPC_RMU_SHAPER_RAMA_EXP_REGION0_LUT_OFFSET, curve[0].offset,
+ MPC_RMU_SHAPER_RAMA_EXP_REGION0_NUM_SEGMENTS, curve[0].segments_num,
+ MPC_RMU_SHAPER_RAMA_EXP_REGION1_LUT_OFFSET, curve[1].offset,
+ MPC_RMU_SHAPER_RAMA_EXP_REGION1_NUM_SEGMENTS, curve[1].segments_num);
+
+ curve += 2;
+ REG_SET_4(SHAPER_RAMB_REGION_28_29[rmu_idx], 0,
+ MPC_RMU_SHAPER_RAMA_EXP_REGION0_LUT_OFFSET, curve[0].offset,
+ MPC_RMU_SHAPER_RAMA_EXP_REGION0_NUM_SEGMENTS, curve[0].segments_num,
+ MPC_RMU_SHAPER_RAMA_EXP_REGION1_LUT_OFFSET, curve[1].offset,
+ MPC_RMU_SHAPER_RAMA_EXP_REGION1_NUM_SEGMENTS, curve[1].segments_num);
+
+ curve += 2;
+ REG_SET_4(SHAPER_RAMB_REGION_30_31[rmu_idx], 0,
+ MPC_RMU_SHAPER_RAMA_EXP_REGION0_LUT_OFFSET, curve[0].offset,
+ MPC_RMU_SHAPER_RAMA_EXP_REGION0_NUM_SEGMENTS, curve[0].segments_num,
+ MPC_RMU_SHAPER_RAMA_EXP_REGION1_LUT_OFFSET, curve[1].offset,
+ MPC_RMU_SHAPER_RAMA_EXP_REGION1_NUM_SEGMENTS, curve[1].segments_num);
+
+ curve += 2;
+ REG_SET_4(SHAPER_RAMB_REGION_32_33[rmu_idx], 0,
+ MPC_RMU_SHAPER_RAMA_EXP_REGION0_LUT_OFFSET, curve[0].offset,
+ MPC_RMU_SHAPER_RAMA_EXP_REGION0_NUM_SEGMENTS, curve[0].segments_num,
+ MPC_RMU_SHAPER_RAMA_EXP_REGION1_LUT_OFFSET, curve[1].offset,
+ MPC_RMU_SHAPER_RAMA_EXP_REGION1_NUM_SEGMENTS, curve[1].segments_num);
+}
+
+
+static void mpc3_program_shaper_lut(
+ struct mpc *mpc,
+ const struct pwl_result_data *rgb,
+ uint32_t num,
+ uint32_t rmu_idx)
+{
+ uint32_t i, red, green, blue;
+ uint32_t red_delta, green_delta, blue_delta;
+ uint32_t red_value, green_value, blue_value;
+
+ struct dcn30_mpc *mpc30 = TO_DCN30_MPC(mpc);
+
+ for (i = 0 ; i < num; i++) {
+
+ red = rgb[i].red_reg;
+ green = rgb[i].green_reg;
+ blue = rgb[i].blue_reg;
+
+ red_delta = rgb[i].delta_red_reg;
+ green_delta = rgb[i].delta_green_reg;
+ blue_delta = rgb[i].delta_blue_reg;
+
+ red_value = ((red_delta & 0x3ff) << 14) | (red & 0x3fff);
+ green_value = ((green_delta & 0x3ff) << 14) | (green & 0x3fff);
+ blue_value = ((blue_delta & 0x3ff) << 14) | (blue & 0x3fff);
+
+ REG_SET(SHAPER_LUT_DATA[rmu_idx], 0, MPC_RMU_SHAPER_LUT_DATA, red_value);
+ REG_SET(SHAPER_LUT_DATA[rmu_idx], 0, MPC_RMU_SHAPER_LUT_DATA, green_value);
+ REG_SET(SHAPER_LUT_DATA[rmu_idx], 0, MPC_RMU_SHAPER_LUT_DATA, blue_value);
+ }
+
+}
+
+static void mpc3_power_on_shaper_3dlut(
+ struct mpc *mpc,
+ uint32_t rmu_idx,
+ bool power_on)
+{
+ uint32_t power_status_shaper = 2;
+ uint32_t power_status_3dlut = 2;
+ struct dcn30_mpc *mpc30 = TO_DCN30_MPC(mpc);
+
+ if (rmu_idx == 0) {
+ REG_SET(MPC_RMU_MEM_PWR_CTRL, 0,
+ MPC_RMU0_MEM_PWR_DIS, power_on == true ? 1:0);
+ /*read status is not mandatory, it is just for debugging*/
+ REG_GET(MPC_RMU_MEM_PWR_CTRL, MPC_RMU0_SHAPER_MEM_PWR_STATE, &power_status_shaper);
+ REG_GET(MPC_RMU_MEM_PWR_CTRL, MPC_RMU0_3DLUT_MEM_PWR_STATE, &power_status_3dlut);
+ } else if (rmu_idx == 1) {
+ REG_SET(MPC_RMU_MEM_PWR_CTRL, 0,
+ MPC_RMU1_MEM_PWR_DIS, power_on == true ? 1:0);
+ REG_GET(MPC_RMU_MEM_PWR_CTRL, MPC_RMU1_SHAPER_MEM_PWR_STATE, &power_status_shaper);
+ REG_GET(MPC_RMU_MEM_PWR_CTRL, MPC_RMU1_3DLUT_MEM_PWR_STATE, &power_status_3dlut);
+ }
+ /*TODO Add rmu_idx == 2 for SIENNA_CICHLID */
+ if (power_status_shaper != 0 && power_on == true)
+ BREAK_TO_DEBUGGER();
+
+ if (power_status_3dlut != 0 && power_on == true)
+ BREAK_TO_DEBUGGER();
+}
+
+
+
+bool mpc3_program_shaper(
+ struct mpc *mpc,
+ const struct pwl_params *params,
+ uint32_t rmu_idx)
+{
+ enum dc_lut_mode current_mode;
+ enum dc_lut_mode next_mode;
+
+ struct dcn30_mpc *mpc30 = TO_DCN30_MPC(mpc);
+
+ if (params == NULL) {
+ REG_SET(SHAPER_CONTROL[rmu_idx], 0, MPC_RMU_SHAPER_LUT_MODE, 0);
+ return false;
+ }
+ current_mode = mpc3_get_shaper_current(mpc, rmu_idx);
+
+ if (current_mode == LUT_BYPASS || current_mode == LUT_RAM_A)
+ next_mode = LUT_RAM_B;
+ else
+ next_mode = LUT_RAM_A;
+
+ mpc3_configure_shaper_lut(mpc, next_mode == LUT_RAM_A ? true:false, rmu_idx);
+
+ if (next_mode == LUT_RAM_A)
+ mpc3_program_shaper_luta_settings(mpc, params, rmu_idx);
+ else
+ mpc3_program_shaper_lutb_settings(mpc, params, rmu_idx);
+
+ mpc3_program_shaper_lut(
+ mpc, params->rgb_resulted, params->hw_points_num, rmu_idx);
+
+ REG_SET(SHAPER_CONTROL[rmu_idx], 0, MPC_RMU_SHAPER_LUT_MODE, next_mode == LUT_RAM_A ? 1:2);
+ mpc3_power_on_shaper_3dlut(mpc, rmu_idx, false);
+
+ return true;
+}
+
+static void mpc3_set_3dlut_mode(
+ struct mpc *mpc,
+ enum dc_lut_mode mode,
+ bool is_color_channel_12bits,
+ bool is_lut_size17x17x17,
+ uint32_t rmu_idx)
+{
+ uint32_t lut_mode;
+ struct dcn30_mpc *mpc30 = TO_DCN30_MPC(mpc);
+
+ if (mode == LUT_BYPASS)
+ lut_mode = 0;
+ else if (mode == LUT_RAM_A)
+ lut_mode = 1;
+ else
+ lut_mode = 2;
+
+ REG_UPDATE_2(RMU_3DLUT_MODE[rmu_idx],
+ MPC_RMU_3DLUT_MODE, lut_mode,
+ MPC_RMU_3DLUT_SIZE, is_lut_size17x17x17 == true ? 0 : 1);
+}
+
+static enum dc_lut_mode get3dlut_config(
+ struct mpc *mpc,
+ bool *is_17x17x17,
+ bool *is_12bits_color_channel,
+ int rmu_idx)
+{
+ uint32_t i_mode, i_enable_10bits, lut_size;
+ enum dc_lut_mode mode;
+ struct dcn30_mpc *mpc30 = TO_DCN30_MPC(mpc);
+
+ REG_GET(RMU_3DLUT_MODE[rmu_idx],
+ MPC_RMU_3DLUT_MODE_CURRENT, &i_mode);
+
+ REG_GET(RMU_3DLUT_READ_WRITE_CONTROL[rmu_idx],
+ MPC_RMU_3DLUT_30BIT_EN, &i_enable_10bits);
+
+ switch (i_mode) {
+ case 0:
+ mode = LUT_BYPASS;
+ break;
+ case 1:
+ mode = LUT_RAM_A;
+ break;
+ case 2:
+ mode = LUT_RAM_B;
+ break;
+ default:
+ mode = LUT_BYPASS;
+ break;
+ }
+ if (i_enable_10bits > 0)
+ *is_12bits_color_channel = false;
+ else
+ *is_12bits_color_channel = true;
+
+ REG_GET(RMU_3DLUT_MODE[rmu_idx], MPC_RMU_3DLUT_SIZE, &lut_size);
+
+ if (lut_size == 0)
+ *is_17x17x17 = true;
+ else
+ *is_17x17x17 = false;
+
+ return mode;
+}
+
+static void mpc3_select_3dlut_ram(
+ struct mpc *mpc,
+ enum dc_lut_mode mode,
+ bool is_color_channel_12bits,
+ uint32_t rmu_idx)
+{
+ struct dcn30_mpc *mpc30 = TO_DCN30_MPC(mpc);
+
+ REG_UPDATE_2(RMU_3DLUT_READ_WRITE_CONTROL[rmu_idx],
+ MPC_RMU_3DLUT_RAM_SEL, mode == LUT_RAM_A ? 0 : 1,
+ MPC_RMU_3DLUT_30BIT_EN, is_color_channel_12bits == true ? 0:1);
+}
+
+static void mpc3_select_3dlut_ram_mask(
+ struct mpc *mpc,
+ uint32_t ram_selection_mask,
+ uint32_t rmu_idx)
+{
+ struct dcn30_mpc *mpc30 = TO_DCN30_MPC(mpc);
+
+ REG_UPDATE(RMU_3DLUT_READ_WRITE_CONTROL[rmu_idx], MPC_RMU_3DLUT_WRITE_EN_MASK,
+ ram_selection_mask);
+ REG_SET(RMU_3DLUT_INDEX[rmu_idx], 0, MPC_RMU_3DLUT_INDEX, 0);
+}
+
+static void mpc3_set3dlut_ram12(
+ struct mpc *mpc,
+ const struct dc_rgb *lut,
+ uint32_t entries,
+ uint32_t rmu_idx)
+{
+ uint32_t i, red, green, blue, red1, green1, blue1;
+ struct dcn30_mpc *mpc30 = TO_DCN30_MPC(mpc);
+
+ for (i = 0 ; i < entries; i += 2) {
+ red = lut[i].red<<4;
+ green = lut[i].green<<4;
+ blue = lut[i].blue<<4;
+ red1 = lut[i+1].red<<4;
+ green1 = lut[i+1].green<<4;
+ blue1 = lut[i+1].blue<<4;
+
+ REG_SET_2(RMU_3DLUT_DATA[rmu_idx], 0,
+ MPC_RMU_3DLUT_DATA0, red,
+ MPC_RMU_3DLUT_DATA1, red1);
+
+ REG_SET_2(RMU_3DLUT_DATA[rmu_idx], 0,
+ MPC_RMU_3DLUT_DATA0, green,
+ MPC_RMU_3DLUT_DATA1, green1);
+
+ REG_SET_2(RMU_3DLUT_DATA[rmu_idx], 0,
+ MPC_RMU_3DLUT_DATA0, blue,
+ MPC_RMU_3DLUT_DATA1, blue1);
+ }
+}
+
+static void mpc3_set3dlut_ram10(
+ struct mpc *mpc,
+ const struct dc_rgb *lut,
+ uint32_t entries,
+ uint32_t rmu_idx)
+{
+ uint32_t i, red, green, blue, value;
+ struct dcn30_mpc *mpc30 = TO_DCN30_MPC(mpc);
+
+ for (i = 0; i < entries; i++) {
+ red = lut[i].red;
+ green = lut[i].green;
+ blue = lut[i].blue;
+ //should we shift red 22bit and green 12? ask Nvenko
+ value = (red<<20) | (green<<10) | blue;
+
+ REG_SET(RMU_3DLUT_DATA_30BIT[rmu_idx], 0, MPC_RMU_3DLUT_DATA_30BIT, value);
+ }
+
+}
+
+
+static void mpc3_init_mpcc(struct mpcc *mpcc, int mpcc_inst)
+{
+ mpcc->mpcc_id = mpcc_inst;
+ mpcc->dpp_id = 0xf;
+ mpcc->mpcc_bot = NULL;
+ mpcc->blnd_cfg.overlap_only = false;
+ mpcc->blnd_cfg.global_alpha = 0xff;
+ mpcc->blnd_cfg.global_gain = 0xff;
+ mpcc->blnd_cfg.background_color_bpc = 4;
+ mpcc->blnd_cfg.bottom_gain_mode = 0;
+ mpcc->blnd_cfg.top_gain = 0x1f000;
+ mpcc->blnd_cfg.bottom_inside_gain = 0x1f000;
+ mpcc->blnd_cfg.bottom_outside_gain = 0x1f000;
+ mpcc->sm_cfg.enable = false;
+ mpcc->shared_bottom = false;
+}
+
+static void program_gamut_remap(
+ struct dcn30_mpc *mpc30,
+ int mpcc_id,
+ const uint16_t *regval,
+ int select)
+{
+ uint16_t selection = 0;
+ struct color_matrices_reg gam_regs;
+
+ if (regval == NULL || select == GAMUT_REMAP_BYPASS) {
+ REG_SET(MPCC_GAMUT_REMAP_MODE[mpcc_id], 0,
+ MPCC_GAMUT_REMAP_MODE, GAMUT_REMAP_BYPASS);
+ return;
+ }
+ switch (select) {
+ case GAMUT_REMAP_COEFF:
+ selection = 1;
+ break;
+ /*this corresponds to GAMUT_REMAP coefficients set B
+ * we don't have common coefficient sets in dcn3ag/dcn3
+ */
+ case GAMUT_REMAP_COMA_COEFF:
+ selection = 2;
+ break;
+ default:
+ break;
+ }
+
+ gam_regs.shifts.csc_c11 = mpc30->mpc_shift->MPCC_GAMUT_REMAP_C11_A;
+ gam_regs.masks.csc_c11 = mpc30->mpc_mask->MPCC_GAMUT_REMAP_C11_A;
+ gam_regs.shifts.csc_c12 = mpc30->mpc_shift->MPCC_GAMUT_REMAP_C12_A;
+ gam_regs.masks.csc_c12 = mpc30->mpc_mask->MPCC_GAMUT_REMAP_C12_A;
+
+
+ if (select == GAMUT_REMAP_COEFF) {
+ gam_regs.csc_c11_c12 = REG(MPC_GAMUT_REMAP_C11_C12_A[mpcc_id]);
+ gam_regs.csc_c33_c34 = REG(MPC_GAMUT_REMAP_C33_C34_A[mpcc_id]);
+
+ cm_helper_program_color_matrices(
+ mpc30->base.ctx,
+ regval,
+ &gam_regs);
+
+ } else if (select == GAMUT_REMAP_COMA_COEFF) {
+
+ gam_regs.csc_c11_c12 = REG(MPC_GAMUT_REMAP_C11_C12_B[mpcc_id]);
+ gam_regs.csc_c33_c34 = REG(MPC_GAMUT_REMAP_C33_C34_B[mpcc_id]);
+
+ cm_helper_program_color_matrices(
+ mpc30->base.ctx,
+ regval,
+ &gam_regs);
+
+ }
+ //select coefficient set to use
+ REG_SET(MPCC_GAMUT_REMAP_MODE[mpcc_id], 0,
+ MPCC_GAMUT_REMAP_MODE, selection);
+}
+
+void mpc3_set_gamut_remap(
+ struct mpc *mpc,
+ int mpcc_id,
+ const struct mpc_grph_gamut_adjustment *adjust)
+{
+ struct dcn30_mpc *mpc30 = TO_DCN30_MPC(mpc);
+ int i = 0;
+ int gamut_mode;
+
+ if (adjust->gamut_adjust_type != GRAPHICS_GAMUT_ADJUST_TYPE_SW)
+ program_gamut_remap(mpc30, mpcc_id, NULL, GAMUT_REMAP_BYPASS);
+ else {
+ struct fixed31_32 arr_matrix[12];
+ uint16_t arr_reg_val[12];
+
+ for (i = 0; i < 12; i++)
+ arr_matrix[i] = adjust->temperature_matrix[i];
+
+ convert_float_matrix(
+ arr_reg_val, arr_matrix, 12);
+
+ //current coefficient set in use
+ REG_GET(MPCC_GAMUT_REMAP_MODE[mpcc_id], MPCC_GAMUT_REMAP_MODE_CURRENT, &gamut_mode);
+
+ if (gamut_mode == 0)
+ gamut_mode = 1; //use coefficient set A
+ else if (gamut_mode == 1)
+ gamut_mode = 2;
+ else
+ gamut_mode = 1;
+
+ program_gamut_remap(mpc30, mpcc_id, arr_reg_val, gamut_mode);
+ }
+}
+
+bool mpc3_program_3dlut(
+ struct mpc *mpc,
+ const struct tetrahedral_params *params,
+ int rmu_idx)
+{
+ enum dc_lut_mode mode;
+ bool is_17x17x17;
+ bool is_12bits_color_channel;
+ const struct dc_rgb *lut0;
+ const struct dc_rgb *lut1;
+ const struct dc_rgb *lut2;
+ const struct dc_rgb *lut3;
+ int lut_size0;
+ int lut_size;
+
+ if (params == NULL) {
+ mpc3_set_3dlut_mode(mpc, LUT_BYPASS, false, false, rmu_idx);
+ return false;
+ }
+ mpc3_power_on_shaper_3dlut(mpc, rmu_idx, true);
+
+ mode = get3dlut_config(mpc, &is_17x17x17, &is_12bits_color_channel, rmu_idx);
+
+ if (mode == LUT_BYPASS || mode == LUT_RAM_B)
+ mode = LUT_RAM_A;
+ else
+ mode = LUT_RAM_B;
+
+ is_17x17x17 = !params->use_tetrahedral_9;
+ is_12bits_color_channel = params->use_12bits;
+ if (is_17x17x17) {
+ lut0 = params->tetrahedral_17.lut0;
+ lut1 = params->tetrahedral_17.lut1;
+ lut2 = params->tetrahedral_17.lut2;
+ lut3 = params->tetrahedral_17.lut3;
+ lut_size0 = sizeof(params->tetrahedral_17.lut0)/
+ sizeof(params->tetrahedral_17.lut0[0]);
+ lut_size = sizeof(params->tetrahedral_17.lut1)/
+ sizeof(params->tetrahedral_17.lut1[0]);
+ } else {
+ lut0 = params->tetrahedral_9.lut0;
+ lut1 = params->tetrahedral_9.lut1;
+ lut2 = params->tetrahedral_9.lut2;
+ lut3 = params->tetrahedral_9.lut3;
+ lut_size0 = sizeof(params->tetrahedral_9.lut0)/
+ sizeof(params->tetrahedral_9.lut0[0]);
+ lut_size = sizeof(params->tetrahedral_9.lut1)/
+ sizeof(params->tetrahedral_9.lut1[0]);
+ }
+
+ mpc3_select_3dlut_ram(mpc, mode,
+ is_12bits_color_channel, rmu_idx);
+ mpc3_select_3dlut_ram_mask(mpc, 0x1, rmu_idx);
+ if (is_12bits_color_channel)
+ mpc3_set3dlut_ram12(mpc, lut0, lut_size0, rmu_idx);
+ else
+ mpc3_set3dlut_ram10(mpc, lut0, lut_size0, rmu_idx);
+
+ mpc3_select_3dlut_ram_mask(mpc, 0x2, rmu_idx);
+ if (is_12bits_color_channel)
+ mpc3_set3dlut_ram12(mpc, lut1, lut_size, rmu_idx);
+ else
+ mpc3_set3dlut_ram10(mpc, lut1, lut_size, rmu_idx);
+
+ mpc3_select_3dlut_ram_mask(mpc, 0x4, rmu_idx);
+ if (is_12bits_color_channel)
+ mpc3_set3dlut_ram12(mpc, lut2, lut_size, rmu_idx);
+ else
+ mpc3_set3dlut_ram10(mpc, lut2, lut_size, rmu_idx);
+
+ mpc3_select_3dlut_ram_mask(mpc, 0x8, rmu_idx);
+ if (is_12bits_color_channel)
+ mpc3_set3dlut_ram12(mpc, lut3, lut_size, rmu_idx);
+ else
+ mpc3_set3dlut_ram10(mpc, lut3, lut_size, rmu_idx);
+
+ mpc3_set_3dlut_mode(mpc, mode, is_12bits_color_channel,
+ is_17x17x17, rmu_idx);
+
+ return true;
+}
+
+void mpc3_set_output_csc(
+ struct mpc *mpc,
+ int opp_id,
+ const uint16_t *regval,
+ enum mpc_output_csc_mode ocsc_mode)
+{
+ struct dcn30_mpc *mpc30 = TO_DCN30_MPC(mpc);
+ struct color_matrices_reg ocsc_regs;
+
+ REG_WRITE(MPC_OUT_CSC_COEF_FORMAT, 0);
+
+ REG_SET(CSC_MODE[opp_id], 0, MPC_OCSC_MODE, ocsc_mode);
+
+ if (ocsc_mode == MPC_OUTPUT_CSC_DISABLE)
+ return;
+
+ if (regval == NULL) {
+ BREAK_TO_DEBUGGER();
+ return;
+ }
+
+ ocsc_regs.shifts.csc_c11 = mpc30->mpc_shift->MPC_OCSC_C11_A;
+ ocsc_regs.masks.csc_c11 = mpc30->mpc_mask->MPC_OCSC_C11_A;
+ ocsc_regs.shifts.csc_c12 = mpc30->mpc_shift->MPC_OCSC_C12_A;
+ ocsc_regs.masks.csc_c12 = mpc30->mpc_mask->MPC_OCSC_C12_A;
+
+ if (ocsc_mode == MPC_OUTPUT_CSC_COEF_A) {
+ ocsc_regs.csc_c11_c12 = REG(CSC_C11_C12_A[opp_id]);
+ ocsc_regs.csc_c33_c34 = REG(CSC_C33_C34_A[opp_id]);
+ } else {
+ ocsc_regs.csc_c11_c12 = REG(CSC_C11_C12_B[opp_id]);
+ ocsc_regs.csc_c33_c34 = REG(CSC_C33_C34_B[opp_id]);
+ }
+ cm_helper_program_color_matrices(
+ mpc30->base.ctx,
+ regval,
+ &ocsc_regs);
+}
+
+void mpc3_set_ocsc_default(
+ struct mpc *mpc,
+ int opp_id,
+ enum dc_color_space color_space,
+ enum mpc_output_csc_mode ocsc_mode)
+{
+ struct dcn30_mpc *mpc30 = TO_DCN30_MPC(mpc);
+ uint32_t arr_size;
+ struct color_matrices_reg ocsc_regs;
+ const uint16_t *regval = NULL;
+
+ REG_WRITE(MPC_OUT_CSC_COEF_FORMAT, 0);
+
+ REG_SET(CSC_MODE[opp_id], 0, MPC_OCSC_MODE, ocsc_mode);
+ if (ocsc_mode == MPC_OUTPUT_CSC_DISABLE)
+ return;
+
+ regval = find_color_matrix(color_space, &arr_size);
+
+ if (regval == NULL) {
+ BREAK_TO_DEBUGGER();
+ return;
+ }
+
+ ocsc_regs.shifts.csc_c11 = mpc30->mpc_shift->MPC_OCSC_C11_A;
+ ocsc_regs.masks.csc_c11 = mpc30->mpc_mask->MPC_OCSC_C11_A;
+ ocsc_regs.shifts.csc_c12 = mpc30->mpc_shift->MPC_OCSC_C12_A;
+ ocsc_regs.masks.csc_c12 = mpc30->mpc_mask->MPC_OCSC_C12_A;
+
+
+ if (ocsc_mode == MPC_OUTPUT_CSC_COEF_A) {
+ ocsc_regs.csc_c11_c12 = REG(CSC_C11_C12_A[opp_id]);
+ ocsc_regs.csc_c33_c34 = REG(CSC_C33_C34_A[opp_id]);
+ } else {
+ ocsc_regs.csc_c11_c12 = REG(CSC_C11_C12_B[opp_id]);
+ ocsc_regs.csc_c33_c34 = REG(CSC_C33_C34_B[opp_id]);
+ }
+
+ cm_helper_program_color_matrices(
+ mpc30->base.ctx,
+ regval,
+ &ocsc_regs);
+}
+
+void mpc3_set_rmu_mux(
+ struct mpc *mpc,
+ int rmu_idx,
+ int value)
+{
+ struct dcn30_mpc *mpc30 = TO_DCN30_MPC(mpc);
+
+ if (rmu_idx == 0)
+ REG_UPDATE(MPC_RMU_CONTROL, MPC_RMU0_MUX, value);
+ else if (rmu_idx == 1)
+ REG_UPDATE(MPC_RMU_CONTROL, MPC_RMU1_MUX, value);
+
+}
+
+uint32_t mpc3_get_rmu_mux_status(
+ struct mpc *mpc,
+ int rmu_idx)
+{
+ uint32_t status = 0xf;
+ struct dcn30_mpc *mpc30 = TO_DCN30_MPC(mpc);
+
+ if (rmu_idx == 0)
+ REG_GET(MPC_RMU_CONTROL, MPC_RMU0_MUX_STATUS, &status);
+ else if (rmu_idx == 1)
+ REG_GET(MPC_RMU_CONTROL, MPC_RMU1_MUX_STATUS, &status);
+
+ return status;
+}
+
+uint32_t mpcc3_acquire_rmu(struct mpc *mpc, int mpcc_id, int rmu_idx)
+{
+ uint32_t rmu_status;
+
+ //determine if this mpcc is already multiplexed to an RMU unit
+ rmu_status = mpc3_get_rmu_mux_status(mpc, rmu_idx);
+ if (rmu_status == mpcc_id)
+ //return rmu_idx of pre_acquired rmu unit
+ return rmu_idx;
+
+ if (rmu_status == 0xf) {//rmu unit is disabled
+ mpc3_set_rmu_mux(mpc, rmu_idx, mpcc_id);
+ return rmu_idx;
+ }
+
+ //no vacant RMU units or invalid parameters acquire_post_bldn_3dlut
+ return -1;
+}
+
+int mpcc3_release_rmu(struct mpc *mpc, int mpcc_id)
+{
+ struct dcn30_mpc *mpc30 = TO_DCN30_MPC(mpc);
+ int rmu_idx;
+ uint32_t rmu_status;
+ int released_rmu = -1;
+
+ for (rmu_idx = 0; rmu_idx < mpc30->num_rmu; rmu_idx++) {
+ rmu_status = mpc3_get_rmu_mux_status(mpc, rmu_idx);
+ if (rmu_status == mpcc_id) {
+ mpc3_set_rmu_mux(mpc, rmu_idx, 0xf);
+ released_rmu = rmu_idx;
+ break;
+ }
+ }
+ return released_rmu;
+
+}
+
+const struct mpc_funcs dcn30_mpc_funcs = {
+ .read_mpcc_state = mpc1_read_mpcc_state,
+ .insert_plane = mpc1_insert_plane,
+ .remove_mpcc = mpc1_remove_mpcc,
+ .mpc_init = mpc1_mpc_init,
+ .mpc_init_single_inst = mpc1_mpc_init_single_inst,
+ .update_blending = mpc2_update_blending,
+ .cursor_lock = mpc1_cursor_lock,
+ .get_mpcc_for_dpp = mpc1_get_mpcc_for_dpp,
+ .wait_for_idle = mpc2_assert_idle_mpcc,
+ .assert_mpcc_idle_before_connect = mpc2_assert_mpcc_idle_before_connect,
+ .init_mpcc_list_from_hw = mpc1_init_mpcc_list_from_hw,
+ .set_denorm = mpc3_set_denorm,
+ .set_denorm_clamp = mpc3_set_denorm_clamp,
+ .set_output_csc = mpc3_set_output_csc,
+ .set_ocsc_default = mpc3_set_ocsc_default,
+ .set_output_gamma = mpc3_set_output_gamma,
+ .insert_plane_to_secondary = NULL,
+ .remove_mpcc_from_secondary = NULL,
+ .set_dwb_mux = mpc3_set_dwb_mux,
+ .disable_dwb_mux = mpc3_disable_dwb_mux,
+ .is_dwb_idle = mpc3_is_dwb_idle,
+ .set_out_rate_control = mpc3_set_out_rate_control,
+ .set_gamut_remap = mpc3_set_gamut_remap,
+ .program_shaper = mpc3_program_shaper,
+ .acquire_rmu = mpcc3_acquire_rmu,
+ .program_3dlut = mpc3_program_3dlut,
+ .release_rmu = mpcc3_release_rmu,
+ .power_on_mpc_mem_pwr = mpc20_power_on_ogam_lut,
+
+};
+
+void dcn30_mpc_construct(struct dcn30_mpc *mpc30,
+ struct dc_context *ctx,
+ const struct dcn30_mpc_registers *mpc_regs,
+ const struct dcn30_mpc_shift *mpc_shift,
+ const struct dcn30_mpc_mask *mpc_mask,
+ int num_mpcc,
+ int num_rmu)
+{
+ int i;
+
+ mpc30->base.ctx = ctx;
+
+ mpc30->base.funcs = &dcn30_mpc_funcs;
+
+ mpc30->mpc_regs = mpc_regs;
+ mpc30->mpc_shift = mpc_shift;
+ mpc30->mpc_mask = mpc_mask;
+
+ mpc30->mpcc_in_use_mask = 0;
+ mpc30->num_mpcc = num_mpcc;
+ mpc30->num_rmu = num_rmu;
+
+ for (i = 0; i < MAX_MPCC; i++)
+ mpc3_init_mpcc(&mpc30->base.mpcc_array[i], i);
+}
+
diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_mpc.h b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_mpc.h
new file mode 100644
index 000000000000..dfd3b9713df6
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_mpc.h
@@ -0,0 +1,665 @@
+/* Copyright 2020 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef __DC_MPCC_DCN30_H__
+#define __DC_MPCC_DCN30_H__
+
+#include "dcn20/dcn20_mpc.h"
+
+#define MAX_RMU 3
+
+#define TO_DCN30_MPC(mpc_base) \
+ container_of(mpc_base, struct dcn30_mpc, base)
+
+#ifdef SRII_MPC_RMU
+#undef SRII_MPC_RMU
+
+#define SRII_MPC_RMU(reg_name, block, id)\
+ .RMU##_##reg_name[id] = BASE(mm ## block ## id ## _ ## reg_name ## _BASE_IDX) + \
+ mm ## block ## id ## _ ## reg_name
+
+#endif
+
+
+#define MPC_REG_LIST_DCN3_0(inst)\
+ MPC_COMMON_REG_LIST_DCN1_0(inst),\
+ SRII(MPCC_TOP_GAIN, MPCC, inst),\
+ SRII(MPCC_BOT_GAIN_INSIDE, MPCC, inst),\
+ SRII(MPCC_BOT_GAIN_OUTSIDE, MPCC, inst),\
+ SRII(MPCC_MEM_PWR_CTRL, MPCC, inst),\
+ SRII(MPCC_OGAM_LUT_INDEX, MPCC_OGAM, inst),\
+ SRII(MPCC_OGAM_LUT_DATA, MPCC_OGAM, inst), \
+ SRII(MPCC_GAMUT_REMAP_COEF_FORMAT, MPCC_OGAM, inst),\
+ SRII(MPCC_GAMUT_REMAP_MODE, MPCC_OGAM, inst),\
+ SRII(MPC_GAMUT_REMAP_C11_C12_A, MPCC_OGAM, inst),\
+ SRII(MPC_GAMUT_REMAP_C33_C34_A, MPCC_OGAM, inst),\
+ SRII(MPC_GAMUT_REMAP_C11_C12_B, MPCC_OGAM, inst),\
+ SRII(MPC_GAMUT_REMAP_C33_C34_B, MPCC_OGAM, inst),\
+ SRII(MPCC_OGAM_RAMA_START_CNTL_B, MPCC_OGAM, inst),\
+ SRII(MPCC_OGAM_RAMA_START_CNTL_G, MPCC_OGAM, inst),\
+ SRII(MPCC_OGAM_RAMA_START_CNTL_R, MPCC_OGAM, inst),\
+ SRII(MPCC_OGAM_RAMA_START_SLOPE_CNTL_B, MPCC_OGAM, inst),\
+ SRII(MPCC_OGAM_RAMA_START_SLOPE_CNTL_G, MPCC_OGAM, inst),\
+ SRII(MPCC_OGAM_RAMA_START_SLOPE_CNTL_R, MPCC_OGAM, inst),\
+ SRII(MPCC_OGAM_RAMA_END_CNTL1_B, MPCC_OGAM, inst),\
+ SRII(MPCC_OGAM_RAMA_END_CNTL2_B, MPCC_OGAM, inst),\
+ SRII(MPCC_OGAM_RAMA_END_CNTL1_G, MPCC_OGAM, inst),\
+ SRII(MPCC_OGAM_RAMA_END_CNTL2_G, MPCC_OGAM, inst),\
+ SRII(MPCC_OGAM_RAMA_END_CNTL1_R, MPCC_OGAM, inst),\
+ SRII(MPCC_OGAM_RAMA_END_CNTL2_R, MPCC_OGAM, inst),\
+ SRII(MPCC_OGAM_RAMA_REGION_0_1, MPCC_OGAM, inst),\
+ SRII(MPCC_OGAM_RAMA_REGION_32_33, MPCC_OGAM, inst),\
+ SRII(MPCC_OGAM_RAMA_OFFSET_B, MPCC_OGAM, inst),\
+ SRII(MPCC_OGAM_RAMA_OFFSET_G, MPCC_OGAM, inst),\
+ SRII(MPCC_OGAM_RAMA_OFFSET_R, MPCC_OGAM, inst),\
+ SRII(MPCC_OGAM_RAMA_START_BASE_CNTL_B, MPCC_OGAM, inst),\
+ SRII(MPCC_OGAM_RAMA_START_BASE_CNTL_G, MPCC_OGAM, inst),\
+ SRII(MPCC_OGAM_RAMA_START_BASE_CNTL_R, MPCC_OGAM, inst),\
+ SRII(MPCC_OGAM_RAMB_START_CNTL_B, MPCC_OGAM, inst),\
+ SRII(MPCC_OGAM_RAMB_START_CNTL_G, MPCC_OGAM, inst),\
+ SRII(MPCC_OGAM_RAMB_START_CNTL_R, MPCC_OGAM, inst),\
+ SRII(MPCC_OGAM_RAMB_START_SLOPE_CNTL_B, MPCC_OGAM, inst),\
+ SRII(MPCC_OGAM_RAMB_START_SLOPE_CNTL_G, MPCC_OGAM, inst),\
+ SRII(MPCC_OGAM_RAMB_START_SLOPE_CNTL_R, MPCC_OGAM, inst),\
+ SRII(MPCC_OGAM_RAMB_END_CNTL1_B, MPCC_OGAM, inst),\
+ SRII(MPCC_OGAM_RAMB_END_CNTL2_B, MPCC_OGAM, inst),\
+ SRII(MPCC_OGAM_RAMB_END_CNTL1_G, MPCC_OGAM, inst),\
+ SRII(MPCC_OGAM_RAMB_END_CNTL2_G, MPCC_OGAM, inst),\
+ SRII(MPCC_OGAM_RAMB_END_CNTL1_R, MPCC_OGAM, inst),\
+ SRII(MPCC_OGAM_RAMB_END_CNTL2_R, MPCC_OGAM, inst),\
+ SRII(MPCC_OGAM_RAMB_REGION_0_1, MPCC_OGAM, inst),\
+ SRII(MPCC_OGAM_RAMB_REGION_32_33, MPCC_OGAM, inst),\
+ SRII(MPCC_OGAM_RAMB_OFFSET_B, MPCC_OGAM, inst),\
+ SRII(MPCC_OGAM_RAMB_OFFSET_G, MPCC_OGAM, inst),\
+ SRII(MPCC_OGAM_RAMB_OFFSET_R, MPCC_OGAM, inst),\
+ SRII(MPCC_OGAM_RAMB_START_BASE_CNTL_B, MPCC_OGAM, inst),\
+ SRII(MPCC_OGAM_RAMB_START_BASE_CNTL_G, MPCC_OGAM, inst),\
+ SRII(MPCC_OGAM_RAMB_START_BASE_CNTL_R, MPCC_OGAM, inst),\
+ SRII(MPCC_OGAM_CONTROL, MPCC_OGAM, inst),\
+ SRII(MPCC_OGAM_LUT_CONTROL, MPCC_OGAM, inst)
+
+/*
+ SRII(MPCC_OGAM_LUT_RAM_CONTROL, MPCC_OGAM, inst),\
+ SRII(MPCC_OGAM_MODE, MPCC_OGAM, inst)
+*/
+
+#define MPC_OUT_MUX_REG_LIST_DCN3_0(inst) \
+ MPC_OUT_MUX_COMMON_REG_LIST_DCN1_0(inst),\
+ SRII(CSC_MODE, MPC_OUT, inst),\
+ SRII(CSC_C11_C12_A, MPC_OUT, inst),\
+ SRII(CSC_C33_C34_A, MPC_OUT, inst),\
+ SRII(CSC_C11_C12_B, MPC_OUT, inst),\
+ SRII(CSC_C33_C34_B, MPC_OUT, inst),\
+ SRII(DENORM_CONTROL, MPC_OUT, inst),\
+ SRII(DENORM_CLAMP_G_Y, MPC_OUT, inst),\
+ SRII(DENORM_CLAMP_B_CB, MPC_OUT, inst), \
+ SR(MPC_OUT_CSC_COEF_FORMAT)
+
+#define MPC_RMU_GLOBAL_REG_LIST_DCN3AG \
+ SR(MPC_RMU_CONTROL),\
+ SR(MPC_RMU_MEM_PWR_CTRL)
+
+#define MPC_RMU_REG_LIST_DCN3AG(inst) \
+ SRII(SHAPER_CONTROL, MPC_RMU, inst),\
+ SRII(SHAPER_OFFSET_R, MPC_RMU, inst),\
+ SRII(SHAPER_OFFSET_G, MPC_RMU, inst),\
+ SRII(SHAPER_OFFSET_B, MPC_RMU, inst),\
+ SRII(SHAPER_SCALE_R, MPC_RMU, inst),\
+ SRII(SHAPER_SCALE_G_B, MPC_RMU, inst),\
+ SRII(SHAPER_LUT_INDEX, MPC_RMU, inst),\
+ SRII(SHAPER_LUT_DATA, MPC_RMU, inst),\
+ SRII(SHAPER_LUT_WRITE_EN_MASK, MPC_RMU, inst),\
+ SRII(SHAPER_RAMA_START_CNTL_B, MPC_RMU, inst),\
+ SRII(SHAPER_RAMA_START_CNTL_G, MPC_RMU, inst),\
+ SRII(SHAPER_RAMA_START_CNTL_R, MPC_RMU, inst),\
+ SRII(SHAPER_RAMA_END_CNTL_B, MPC_RMU, inst),\
+ SRII(SHAPER_RAMA_END_CNTL_G, MPC_RMU, inst),\
+ SRII(SHAPER_RAMA_END_CNTL_R, MPC_RMU, inst),\
+ SRII(SHAPER_RAMA_REGION_0_1, MPC_RMU, inst),\
+ SRII(SHAPER_RAMA_REGION_2_3, MPC_RMU, inst),\
+ SRII(SHAPER_RAMA_REGION_4_5, MPC_RMU, inst),\
+ SRII(SHAPER_RAMA_REGION_6_7, MPC_RMU, inst),\
+ SRII(SHAPER_RAMA_REGION_8_9, MPC_RMU, inst),\
+ SRII(SHAPER_RAMA_REGION_10_11, MPC_RMU, inst),\
+ SRII(SHAPER_RAMA_REGION_12_13, MPC_RMU, inst),\
+ SRII(SHAPER_RAMA_REGION_14_15, MPC_RMU, inst),\
+ SRII(SHAPER_RAMA_REGION_16_17, MPC_RMU, inst),\
+ SRII(SHAPER_RAMA_REGION_18_19, MPC_RMU, inst),\
+ SRII(SHAPER_RAMA_REGION_20_21, MPC_RMU, inst),\
+ SRII(SHAPER_RAMA_REGION_22_23, MPC_RMU, inst),\
+ SRII(SHAPER_RAMA_REGION_24_25, MPC_RMU, inst),\
+ SRII(SHAPER_RAMA_REGION_26_27, MPC_RMU, inst),\
+ SRII(SHAPER_RAMA_REGION_28_29, MPC_RMU, inst),\
+ SRII(SHAPER_RAMA_REGION_30_31, MPC_RMU, inst),\
+ SRII(SHAPER_RAMA_REGION_32_33, MPC_RMU, inst),\
+ SRII(SHAPER_RAMB_START_CNTL_B, MPC_RMU, inst),\
+ SRII(SHAPER_RAMB_START_CNTL_G, MPC_RMU, inst),\
+ SRII(SHAPER_RAMB_START_CNTL_R, MPC_RMU, inst),\
+ SRII(SHAPER_RAMB_END_CNTL_B, MPC_RMU, inst),\
+ SRII(SHAPER_RAMB_END_CNTL_G, MPC_RMU, inst),\
+ SRII(SHAPER_RAMB_END_CNTL_R, MPC_RMU, inst),\
+ SRII(SHAPER_RAMB_REGION_0_1, MPC_RMU, inst),\
+ SRII(SHAPER_RAMB_REGION_2_3, MPC_RMU, inst),\
+ SRII(SHAPER_RAMB_REGION_4_5, MPC_RMU, inst),\
+ SRII(SHAPER_RAMB_REGION_6_7, MPC_RMU, inst),\
+ SRII(SHAPER_RAMB_REGION_8_9, MPC_RMU, inst),\
+ SRII(SHAPER_RAMB_REGION_10_11, MPC_RMU, inst),\
+ SRII(SHAPER_RAMB_REGION_12_13, MPC_RMU, inst),\
+ SRII(SHAPER_RAMB_REGION_14_15, MPC_RMU, inst),\
+ SRII(SHAPER_RAMB_REGION_16_17, MPC_RMU, inst),\
+ SRII(SHAPER_RAMB_REGION_18_19, MPC_RMU, inst),\
+ SRII(SHAPER_RAMB_REGION_20_21, MPC_RMU, inst),\
+ SRII(SHAPER_RAMB_REGION_22_23, MPC_RMU, inst),\
+ SRII(SHAPER_RAMB_REGION_24_25, MPC_RMU, inst),\
+ SRII(SHAPER_RAMB_REGION_26_27, MPC_RMU, inst),\
+ SRII(SHAPER_RAMB_REGION_28_29, MPC_RMU, inst),\
+ SRII(SHAPER_RAMB_REGION_30_31, MPC_RMU, inst),\
+ SRII(SHAPER_RAMB_REGION_32_33, MPC_RMU, inst),\
+ SRII_MPC_RMU(3DLUT_MODE, MPC_RMU, inst),\
+ SRII_MPC_RMU(3DLUT_INDEX, MPC_RMU, inst),\
+ SRII_MPC_RMU(3DLUT_DATA, MPC_RMU, inst),\
+ SRII_MPC_RMU(3DLUT_DATA_30BIT, MPC_RMU, inst),\
+ SRII_MPC_RMU(3DLUT_READ_WRITE_CONTROL, MPC_RMU, inst),\
+ SRII_MPC_RMU(3DLUT_OUT_NORM_FACTOR, MPC_RMU, inst),\
+ SRII_MPC_RMU(3DLUT_OUT_OFFSET_R, MPC_RMU, inst),\
+ SRII_MPC_RMU(3DLUT_OUT_OFFSET_G, MPC_RMU, inst),\
+ SRII_MPC_RMU(3DLUT_OUT_OFFSET_B, MPC_RMU, inst)
+
+
+#define MPC_DWB_MUX_REG_LIST_DCN3_0(inst) \
+ SRII_DWB(DWB_MUX, MUX, MPC_DWB, inst)
+
+#define MPC_REG_VARIABLE_LIST_DCN3_0 \
+ MPC_REG_VARIABLE_LIST_DCN2_0 \
+ uint32_t DWB_MUX[MAX_DWB]; \
+ uint32_t MPCC_GAMUT_REMAP_COEF_FORMAT[MAX_MPCC]; \
+ uint32_t MPCC_GAMUT_REMAP_MODE[MAX_MPCC]; \
+ uint32_t MPC_GAMUT_REMAP_C11_C12_A[MAX_MPCC]; \
+ uint32_t MPC_GAMUT_REMAP_C33_C34_A[MAX_MPCC]; \
+ uint32_t MPC_GAMUT_REMAP_C11_C12_B[MAX_MPCC]; \
+ uint32_t MPC_GAMUT_REMAP_C33_C34_B[MAX_MPCC]; \
+ uint32_t MPC_RMU_CONTROL; \
+ uint32_t MPC_RMU_MEM_PWR_CTRL; \
+ uint32_t SHAPER_CONTROL[MAX_RMU]; \
+ uint32_t SHAPER_OFFSET_R[MAX_RMU]; \
+ uint32_t SHAPER_OFFSET_G[MAX_RMU]; \
+ uint32_t SHAPER_OFFSET_B[MAX_RMU]; \
+ uint32_t SHAPER_SCALE_R[MAX_RMU]; \
+ uint32_t SHAPER_SCALE_G_B[MAX_RMU]; \
+ uint32_t SHAPER_LUT_INDEX[MAX_RMU]; \
+ uint32_t SHAPER_LUT_DATA[MAX_RMU]; \
+ uint32_t SHAPER_LUT_WRITE_EN_MASK[MAX_RMU]; \
+ uint32_t SHAPER_RAMA_START_CNTL_B[MAX_RMU]; \
+ uint32_t SHAPER_RAMA_START_CNTL_G[MAX_RMU]; \
+ uint32_t SHAPER_RAMA_START_CNTL_R[MAX_RMU]; \
+ uint32_t SHAPER_RAMA_END_CNTL_B[MAX_RMU]; \
+ uint32_t SHAPER_RAMA_END_CNTL_G[MAX_RMU]; \
+ uint32_t SHAPER_RAMA_END_CNTL_R[MAX_RMU]; \
+ uint32_t SHAPER_RAMA_REGION_0_1[MAX_RMU]; \
+ uint32_t SHAPER_RAMA_REGION_2_3[MAX_RMU]; \
+ uint32_t SHAPER_RAMA_REGION_4_5[MAX_RMU]; \
+ uint32_t SHAPER_RAMA_REGION_6_7[MAX_RMU]; \
+ uint32_t SHAPER_RAMA_REGION_8_9[MAX_RMU]; \
+ uint32_t SHAPER_RAMA_REGION_10_11[MAX_RMU]; \
+ uint32_t SHAPER_RAMA_REGION_12_13[MAX_RMU]; \
+ uint32_t SHAPER_RAMA_REGION_14_15[MAX_RMU]; \
+ uint32_t SHAPER_RAMA_REGION_16_17[MAX_RMU]; \
+ uint32_t SHAPER_RAMA_REGION_18_19[MAX_RMU]; \
+ uint32_t SHAPER_RAMA_REGION_20_21[MAX_RMU]; \
+ uint32_t SHAPER_RAMA_REGION_22_23[MAX_RMU]; \
+ uint32_t SHAPER_RAMA_REGION_24_25[MAX_RMU]; \
+ uint32_t SHAPER_RAMA_REGION_26_27[MAX_RMU]; \
+ uint32_t SHAPER_RAMA_REGION_28_29[MAX_RMU]; \
+ uint32_t SHAPER_RAMA_REGION_30_31[MAX_RMU]; \
+ uint32_t SHAPER_RAMA_REGION_32_33[MAX_RMU]; \
+ uint32_t MPCC_OGAM_RAMA_START_SLOPE_CNTL_B[MAX_MPCC]; \
+ uint32_t MPCC_OGAM_RAMA_START_SLOPE_CNTL_G[MAX_MPCC]; \
+ uint32_t MPCC_OGAM_RAMA_START_SLOPE_CNTL_R[MAX_MPCC]; \
+ uint32_t MPCC_OGAM_RAMA_OFFSET_B[MAX_MPCC]; \
+ uint32_t MPCC_OGAM_RAMA_OFFSET_G[MAX_MPCC]; \
+ uint32_t MPCC_OGAM_RAMA_OFFSET_R[MAX_MPCC]; \
+ uint32_t MPCC_OGAM_RAMA_START_BASE_CNTL_B[MAX_MPCC]; \
+ uint32_t MPCC_OGAM_RAMA_START_BASE_CNTL_G[MAX_MPCC]; \
+ uint32_t MPCC_OGAM_RAMA_START_BASE_CNTL_R[MAX_MPCC];\
+ uint32_t SHAPER_RAMB_START_CNTL_B[MAX_RMU]; \
+ uint32_t SHAPER_RAMB_START_CNTL_G[MAX_RMU]; \
+ uint32_t SHAPER_RAMB_START_CNTL_R[MAX_RMU]; \
+ uint32_t SHAPER_RAMB_END_CNTL_B[MAX_RMU]; \
+ uint32_t SHAPER_RAMB_END_CNTL_G[MAX_RMU]; \
+ uint32_t SHAPER_RAMB_END_CNTL_R[MAX_RMU]; \
+ uint32_t SHAPER_RAMB_REGION_0_1[MAX_RMU]; \
+ uint32_t SHAPER_RAMB_REGION_2_3[MAX_RMU]; \
+ uint32_t SHAPER_RAMB_REGION_4_5[MAX_RMU]; \
+ uint32_t SHAPER_RAMB_REGION_6_7[MAX_RMU]; \
+ uint32_t SHAPER_RAMB_REGION_8_9[MAX_RMU]; \
+ uint32_t SHAPER_RAMB_REGION_10_11[MAX_RMU]; \
+ uint32_t SHAPER_RAMB_REGION_12_13[MAX_RMU]; \
+ uint32_t SHAPER_RAMB_REGION_14_15[MAX_RMU]; \
+ uint32_t SHAPER_RAMB_REGION_16_17[MAX_RMU]; \
+ uint32_t SHAPER_RAMB_REGION_18_19[MAX_RMU]; \
+ uint32_t SHAPER_RAMB_REGION_20_21[MAX_RMU]; \
+ uint32_t SHAPER_RAMB_REGION_22_23[MAX_RMU]; \
+ uint32_t SHAPER_RAMB_REGION_24_25[MAX_RMU]; \
+ uint32_t SHAPER_RAMB_REGION_26_27[MAX_RMU]; \
+ uint32_t SHAPER_RAMB_REGION_28_29[MAX_RMU]; \
+ uint32_t SHAPER_RAMB_REGION_30_31[MAX_RMU]; \
+ uint32_t SHAPER_RAMB_REGION_32_33[MAX_RMU]; \
+ uint32_t RMU_3DLUT_MODE[MAX_RMU]; \
+ uint32_t RMU_3DLUT_INDEX[MAX_RMU]; \
+ uint32_t RMU_3DLUT_DATA[MAX_RMU]; \
+ uint32_t RMU_3DLUT_DATA_30BIT[MAX_RMU]; \
+ uint32_t RMU_3DLUT_READ_WRITE_CONTROL[MAX_RMU]; \
+ uint32_t RMU_3DLUT_OUT_NORM_FACTOR[MAX_RMU]; \
+ uint32_t RMU_3DLUT_OUT_OFFSET_R[MAX_RMU]; \
+ uint32_t RMU_3DLUT_OUT_OFFSET_G[MAX_RMU]; \
+ uint32_t RMU_3DLUT_OUT_OFFSET_B[MAX_RMU]; \
+ uint32_t MPCC_OGAM_RAMB_START_SLOPE_CNTL_B[MAX_MPCC]; \
+ uint32_t MPCC_OGAM_RAMB_START_SLOPE_CNTL_G[MAX_MPCC]; \
+ uint32_t MPCC_OGAM_RAMB_START_SLOPE_CNTL_R[MAX_MPCC]; \
+ uint32_t MPCC_OGAM_CONTROL[MAX_MPCC]; \
+ uint32_t MPCC_OGAM_LUT_CONTROL[MAX_MPCC]; \
+ uint32_t MPCC_OGAM_RAMB_OFFSET_B[MAX_MPCC]; \
+ uint32_t MPCC_OGAM_RAMB_OFFSET_G[MAX_MPCC]; \
+ uint32_t MPCC_OGAM_RAMB_OFFSET_R[MAX_MPCC]; \
+ uint32_t MPCC_OGAM_RAMB_START_BASE_CNTL_B[MAX_MPCC]; \
+ uint32_t MPCC_OGAM_RAMB_START_BASE_CNTL_G[MAX_MPCC]; \
+ uint32_t MPCC_OGAM_RAMB_START_BASE_CNTL_R[MAX_MPCC]; \
+ uint32_t MPC_OUT_CSC_COEF_FORMAT
+
+#define MPC_COMMON_MASK_SH_LIST_DCN3_0(mask_sh) \
+ MPC_COMMON_MASK_SH_LIST_DCN1_0(mask_sh),\
+ SF(MPCC0_MPCC_CONTROL, MPCC_BG_BPC, mask_sh),\
+ SF(MPCC0_MPCC_CONTROL, MPCC_BOT_GAIN_MODE, mask_sh),\
+ SF(MPCC0_MPCC_TOP_GAIN, MPCC_TOP_GAIN, mask_sh),\
+ SF(MPCC0_MPCC_BOT_GAIN_INSIDE, MPCC_BOT_GAIN_INSIDE, mask_sh),\
+ SF(MPCC0_MPCC_BOT_GAIN_OUTSIDE, MPCC_BOT_GAIN_OUTSIDE, mask_sh),\
+ SF(MPC_OUT0_CSC_MODE, MPC_OCSC_MODE, mask_sh),\
+ SF(MPC_OUT0_CSC_C11_C12_A, MPC_OCSC_C11_A, mask_sh),\
+ SF(MPC_OUT0_CSC_C11_C12_A, MPC_OCSC_C12_A, mask_sh),\
+ SF(MPCC0_MPCC_STATUS, MPCC_DISABLED, mask_sh),\
+ SF(MPCC0_MPCC_MEM_PWR_CTRL, MPCC_OGAM_MEM_PWR_FORCE, mask_sh),\
+ SF(MPCC0_MPCC_MEM_PWR_CTRL, MPCC_OGAM_MEM_PWR_DIS, mask_sh),\
+ SF(MPC_OUT0_DENORM_CONTROL, MPC_OUT_DENORM_MODE, mask_sh),\
+ SF(MPC_OUT0_DENORM_CONTROL, MPC_OUT_DENORM_CLAMP_MAX_R_CR, mask_sh),\
+ SF(MPC_OUT0_DENORM_CONTROL, MPC_OUT_DENORM_CLAMP_MIN_R_CR, mask_sh),\
+ SF(MPC_OUT0_DENORM_CLAMP_G_Y, MPC_OUT_DENORM_CLAMP_MAX_G_Y, mask_sh),\
+ SF(MPC_OUT0_DENORM_CLAMP_G_Y, MPC_OUT_DENORM_CLAMP_MIN_G_Y, mask_sh),\
+ SF(MPC_OUT0_DENORM_CLAMP_B_CB, MPC_OUT_DENORM_CLAMP_MAX_B_CB, mask_sh),\
+ SF(MPC_OUT0_DENORM_CLAMP_B_CB, MPC_OUT_DENORM_CLAMP_MIN_B_CB, mask_sh),\
+ SF(MPCC_OGAM0_MPCC_GAMUT_REMAP_MODE, MPCC_GAMUT_REMAP_MODE, mask_sh),\
+ SF(MPCC_OGAM0_MPCC_GAMUT_REMAP_MODE, MPCC_GAMUT_REMAP_MODE_CURRENT, mask_sh),\
+ SF(MPCC_OGAM0_MPCC_GAMUT_REMAP_COEF_FORMAT, MPCC_GAMUT_REMAP_COEF_FORMAT, mask_sh),\
+ SF(MPCC_OGAM0_MPC_GAMUT_REMAP_C11_C12_A, MPCC_GAMUT_REMAP_C11_A, mask_sh),\
+ SF(MPCC_OGAM0_MPC_GAMUT_REMAP_C11_C12_A, MPCC_GAMUT_REMAP_C12_A, mask_sh),\
+ SF(MPC_DWB0_MUX, MPC_DWB0_MUX, mask_sh),\
+ SF(MPC_DWB0_MUX, MPC_DWB0_MUX_STATUS, mask_sh),\
+ SF(MPC_OUT0_MUX, MPC_OUT_RATE_CONTROL, mask_sh),\
+ SF(MPC_OUT0_MUX, MPC_OUT_RATE_CONTROL_DISABLE, mask_sh),\
+ SF(MPC_OUT0_MUX, MPC_OUT_FLOW_CONTROL_MODE, mask_sh),\
+ SF(MPC_OUT0_MUX, MPC_OUT_FLOW_CONTROL_COUNT, mask_sh), \
+ SF(MPC_RMU_CONTROL, MPC_RMU0_MUX, mask_sh), \
+ SF(MPC_RMU_CONTROL, MPC_RMU1_MUX, mask_sh), \
+ SF(MPC_RMU_CONTROL, MPC_RMU0_MUX_STATUS, mask_sh), \
+ SF(MPC_RMU_CONTROL, MPC_RMU1_MUX_STATUS, mask_sh), \
+ SF(MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_0_1, MPCC_OGAM_RAMA_EXP_REGION0_LUT_OFFSET, mask_sh),\
+ SF(MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_0_1, MPCC_OGAM_RAMA_EXP_REGION0_NUM_SEGMENTS, mask_sh),\
+ SF(MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_0_1, MPCC_OGAM_RAMA_EXP_REGION1_LUT_OFFSET, mask_sh),\
+ SF(MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_0_1, MPCC_OGAM_RAMA_EXP_REGION1_NUM_SEGMENTS, mask_sh),\
+ SF(MPCC_OGAM0_MPCC_OGAM_RAMA_END_CNTL2_B, MPCC_OGAM_RAMA_EXP_REGION_END_SLOPE_B, mask_sh),\
+ SF(MPCC_OGAM0_MPCC_OGAM_RAMA_END_CNTL2_B, MPCC_OGAM_RAMA_EXP_REGION_END_B, mask_sh),\
+ SF(MPCC_OGAM0_MPCC_OGAM_RAMA_END_CNTL1_B, MPCC_OGAM_RAMA_EXP_REGION_END_BASE_B, mask_sh),\
+ SF(MPCC_OGAM0_MPCC_OGAM_RAMA_START_SLOPE_CNTL_B, MPCC_OGAM_RAMA_EXP_REGION_START_SLOPE_B, mask_sh),\
+ SF(MPCC_OGAM0_MPCC_OGAM_RAMA_START_BASE_CNTL_B, MPCC_OGAM_RAMA_EXP_REGION_START_BASE_B, mask_sh),\
+ SF(MPCC_OGAM0_MPCC_OGAM_RAMA_START_CNTL_B, MPCC_OGAM_RAMA_EXP_REGION_START_B, mask_sh),\
+ SF(MPCC_OGAM0_MPCC_OGAM_RAMA_START_CNTL_B, MPCC_OGAM_RAMA_EXP_REGION_START_SEGMENT_B, mask_sh),\
+ SF(MPCC_OGAM0_MPCC_OGAM_RAMA_OFFSET_B, MPCC_OGAM_RAMA_OFFSET_B, mask_sh),\
+ SF(MPCC_OGAM0_MPCC_OGAM_RAMA_OFFSET_G, MPCC_OGAM_RAMA_OFFSET_G, mask_sh),\
+ SF(MPCC_OGAM0_MPCC_OGAM_RAMA_OFFSET_R, MPCC_OGAM_RAMA_OFFSET_R, mask_sh),\
+ SF(MPCC_OGAM0_MPCC_OGAM_LUT_INDEX, MPCC_OGAM_LUT_INDEX, mask_sh),\
+ SF(MPCC_OGAM0_MPCC_OGAM_CONTROL, MPCC_OGAM_MODE, mask_sh),\
+ SF(MPCC_OGAM0_MPCC_OGAM_CONTROL, MPCC_OGAM_SELECT, mask_sh),\
+ SF(MPCC_OGAM0_MPCC_OGAM_CONTROL, MPCC_OGAM_PWL_DISABLE, mask_sh),\
+ SF(MPCC_OGAM0_MPCC_OGAM_CONTROL, MPCC_OGAM_MODE_CURRENT, mask_sh),\
+ SF(MPCC_OGAM0_MPCC_OGAM_CONTROL, MPCC_OGAM_SELECT_CURRENT, mask_sh),\
+ SF(MPCC_OGAM0_MPCC_OGAM_LUT_CONTROL, MPCC_OGAM_LUT_WRITE_COLOR_MASK, mask_sh),\
+ SF(MPCC_OGAM0_MPCC_OGAM_LUT_CONTROL, MPCC_OGAM_LUT_READ_COLOR_SEL, mask_sh),\
+ SF(MPCC_OGAM0_MPCC_OGAM_LUT_CONTROL, MPCC_OGAM_LUT_READ_DBG, mask_sh),\
+ SF(MPCC_OGAM0_MPCC_OGAM_LUT_CONTROL, MPCC_OGAM_LUT_HOST_SEL, mask_sh),\
+ SF(MPCC_OGAM0_MPCC_OGAM_LUT_CONTROL, MPCC_OGAM_LUT_CONFIG_MODE, mask_sh),\
+ SF(MPCC_OGAM0_MPCC_OGAM_LUT_CONTROL, MPCC_OGAM_LUT_STATUS, mask_sh),\
+ SF(MPCC_OGAM0_MPCC_OGAM_LUT_DATA, MPCC_OGAM_LUT_DATA, mask_sh),\
+ SF(MPC_RMU0_3DLUT_MODE, MPC_RMU_3DLUT_MODE, mask_sh),\
+ SF(MPC_RMU0_3DLUT_MODE, MPC_RMU_3DLUT_SIZE, mask_sh),\
+ SF(MPC_RMU0_3DLUT_MODE, MPC_RMU_3DLUT_MODE_CURRENT, mask_sh),\
+ SF(MPC_RMU0_3DLUT_READ_WRITE_CONTROL, MPC_RMU_3DLUT_WRITE_EN_MASK, mask_sh),\
+ SF(MPC_RMU0_3DLUT_READ_WRITE_CONTROL, MPC_RMU_3DLUT_RAM_SEL, mask_sh),\
+ SF(MPC_RMU0_3DLUT_READ_WRITE_CONTROL, MPC_RMU_3DLUT_30BIT_EN, mask_sh),\
+ SF(MPC_RMU0_3DLUT_READ_WRITE_CONTROL, MPC_RMU_3DLUT_CONFIG_STATUS, mask_sh),\
+ SF(MPC_RMU0_3DLUT_READ_WRITE_CONTROL, MPC_RMU_3DLUT_READ_SEL, mask_sh),\
+ SF(MPC_RMU0_3DLUT_INDEX, MPC_RMU_3DLUT_INDEX, mask_sh),\
+ SF(MPC_RMU0_3DLUT_DATA, MPC_RMU_3DLUT_DATA0, mask_sh),\
+ SF(MPC_RMU0_3DLUT_DATA, MPC_RMU_3DLUT_DATA1, mask_sh),\
+ SF(MPC_RMU0_3DLUT_DATA_30BIT, MPC_RMU_3DLUT_DATA_30BIT, mask_sh),\
+ SF(MPC_RMU0_SHAPER_CONTROL, MPC_RMU_SHAPER_LUT_MODE, mask_sh),\
+ SF(MPC_RMU0_SHAPER_CONTROL, MPC_RMU_SHAPER_LUT_MODE_CURRENT, mask_sh),\
+ SF(MPC_RMU0_SHAPER_OFFSET_R, MPC_RMU_SHAPER_OFFSET_R, mask_sh),\
+ SF(MPC_RMU0_SHAPER_OFFSET_G, MPC_RMU_SHAPER_OFFSET_G, mask_sh),\
+ SF(MPC_RMU0_SHAPER_OFFSET_B, MPC_RMU_SHAPER_OFFSET_B, mask_sh),\
+ SF(MPC_RMU0_SHAPER_SCALE_R, MPC_RMU_SHAPER_SCALE_R, mask_sh),\
+ SF(MPC_RMU0_SHAPER_SCALE_G_B, MPC_RMU_SHAPER_SCALE_G, mask_sh),\
+ SF(MPC_RMU0_SHAPER_SCALE_G_B, MPC_RMU_SHAPER_SCALE_B, mask_sh),\
+ SF(MPC_RMU0_SHAPER_LUT_INDEX, MPC_RMU_SHAPER_LUT_INDEX, mask_sh),\
+ SF(MPC_RMU0_SHAPER_LUT_DATA, MPC_RMU_SHAPER_LUT_DATA, mask_sh),\
+ SF(MPC_RMU0_SHAPER_LUT_WRITE_EN_MASK, MPC_RMU_SHAPER_LUT_WRITE_EN_MASK, mask_sh),\
+ SF(MPC_RMU0_SHAPER_LUT_WRITE_EN_MASK, MPC_RMU_SHAPER_LUT_WRITE_SEL, mask_sh),\
+ SF(MPC_RMU0_SHAPER_LUT_WRITE_EN_MASK, MPC_RMU_SHAPER_CONFIG_STATUS, mask_sh),\
+ SF(MPC_RMU0_SHAPER_RAMA_START_CNTL_B, MPC_RMU_SHAPER_RAMA_EXP_REGION_START_B, mask_sh),\
+ SF(MPC_RMU0_SHAPER_RAMA_START_CNTL_B, MPC_RMU_SHAPER_RAMA_EXP_REGION_START_SEGMENT_B, mask_sh),\
+ SF(MPC_RMU0_SHAPER_RAMA_END_CNTL_B, MPC_RMU_SHAPER_RAMA_EXP_REGION_END_B, mask_sh),\
+ SF(MPC_RMU0_SHAPER_RAMA_END_CNTL_B, MPC_RMU_SHAPER_RAMA_EXP_REGION_END_BASE_B, mask_sh),\
+ SF(MPC_RMU0_SHAPER_RAMA_REGION_0_1, MPC_RMU_SHAPER_RAMA_EXP_REGION0_LUT_OFFSET, mask_sh),\
+ SF(MPC_RMU0_SHAPER_RAMA_REGION_0_1, MPC_RMU_SHAPER_RAMA_EXP_REGION0_NUM_SEGMENTS, mask_sh),\
+ SF(MPC_RMU0_SHAPER_RAMA_REGION_0_1, MPC_RMU_SHAPER_RAMA_EXP_REGION1_LUT_OFFSET, mask_sh),\
+ SF(MPC_RMU0_SHAPER_RAMA_REGION_0_1, MPC_RMU_SHAPER_RAMA_EXP_REGION1_NUM_SEGMENTS, mask_sh),\
+ SF(MPC_RMU_MEM_PWR_CTRL, MPC_RMU0_MEM_PWR_FORCE, mask_sh),\
+ SF(MPC_RMU_MEM_PWR_CTRL, MPC_RMU0_MEM_PWR_DIS, mask_sh),\
+ SF(MPC_RMU_MEM_PWR_CTRL, MPC_RMU0_SHAPER_MEM_PWR_STATE, mask_sh),\
+ SF(MPC_RMU_MEM_PWR_CTRL, MPC_RMU0_3DLUT_MEM_PWR_STATE, mask_sh),\
+ SF(MPC_RMU_MEM_PWR_CTRL, MPC_RMU1_MEM_PWR_FORCE, mask_sh),\
+ SF(MPC_RMU_MEM_PWR_CTRL, MPC_RMU1_MEM_PWR_DIS, mask_sh),\
+ SF(MPC_RMU_MEM_PWR_CTRL, MPC_RMU1_SHAPER_MEM_PWR_STATE, mask_sh),\
+ SF(MPC_RMU_MEM_PWR_CTRL, MPC_RMU1_3DLUT_MEM_PWR_STATE, mask_sh),\
+ SF(CUR_VUPDATE_LOCK_SET0, CUR_VUPDATE_LOCK_SET, mask_sh)
+
+
+#define MPC_COMMON_MASK_SH_LIST_DCN30(mask_sh) \
+ MPC_COMMON_MASK_SH_LIST_DCN1_0(mask_sh),\
+ SF(MPCC0_MPCC_CONTROL, MPCC_BG_BPC, mask_sh),\
+ SF(MPCC0_MPCC_CONTROL, MPCC_BOT_GAIN_MODE, mask_sh),\
+ SF(MPCC0_MPCC_TOP_GAIN, MPCC_TOP_GAIN, mask_sh),\
+ SF(MPCC0_MPCC_BOT_GAIN_INSIDE, MPCC_BOT_GAIN_INSIDE, mask_sh),\
+ SF(MPCC0_MPCC_BOT_GAIN_OUTSIDE, MPCC_BOT_GAIN_OUTSIDE, mask_sh),\
+ SF(MPC_OUT0_CSC_MODE, MPC_OCSC_MODE, mask_sh),\
+ SF(MPC_OUT0_CSC_C11_C12_A, MPC_OCSC_C11_A, mask_sh),\
+ SF(MPC_OUT0_CSC_C11_C12_A, MPC_OCSC_C12_A, mask_sh),\
+ SF(MPCC0_MPCC_STATUS, MPCC_DISABLED, mask_sh),\
+ SF(MPCC0_MPCC_MEM_PWR_CTRL, MPCC_OGAM_MEM_PWR_FORCE, mask_sh),\
+ SF(MPCC0_MPCC_MEM_PWR_CTRL, MPCC_OGAM_MEM_PWR_DIS, mask_sh),\
+ SF(MPC_OUT0_DENORM_CONTROL, MPC_OUT_DENORM_MODE, mask_sh),\
+ SF(MPC_OUT0_DENORM_CONTROL, MPC_OUT_DENORM_CLAMP_MAX_R_CR, mask_sh),\
+ SF(MPC_OUT0_DENORM_CONTROL, MPC_OUT_DENORM_CLAMP_MIN_R_CR, mask_sh),\
+ SF(MPC_OUT0_DENORM_CLAMP_G_Y, MPC_OUT_DENORM_CLAMP_MAX_G_Y, mask_sh),\
+ SF(MPC_OUT0_DENORM_CLAMP_G_Y, MPC_OUT_DENORM_CLAMP_MIN_G_Y, mask_sh),\
+ SF(MPC_OUT0_DENORM_CLAMP_B_CB, MPC_OUT_DENORM_CLAMP_MAX_B_CB, mask_sh),\
+ SF(MPC_OUT0_DENORM_CLAMP_B_CB, MPC_OUT_DENORM_CLAMP_MIN_B_CB, mask_sh),\
+ SF(MPCC_OGAM0_MPCC_GAMUT_REMAP_MODE, MPCC_GAMUT_REMAP_MODE, mask_sh),\
+ SF(MPCC_OGAM0_MPCC_GAMUT_REMAP_MODE, MPCC_GAMUT_REMAP_MODE_CURRENT, mask_sh),\
+ SF(MPCC_OGAM0_MPCC_GAMUT_REMAP_COEF_FORMAT, MPCC_GAMUT_REMAP_COEF_FORMAT, mask_sh),\
+ SF(MPCC_OGAM0_MPC_GAMUT_REMAP_C11_C12_A, MPCC_GAMUT_REMAP_C11_A, mask_sh),\
+ SF(MPCC_OGAM0_MPC_GAMUT_REMAP_C11_C12_A, MPCC_GAMUT_REMAP_C12_A, mask_sh),\
+ SF(MPC_DWB0_MUX, MPC_DWB0_MUX, mask_sh),\
+ SF(MPC_DWB0_MUX, MPC_DWB0_MUX_STATUS, mask_sh),\
+ SF(MPC_OUT0_MUX, MPC_OUT_RATE_CONTROL, mask_sh),\
+ SF(MPC_OUT0_MUX, MPC_OUT_RATE_CONTROL_DISABLE, mask_sh),\
+ SF(MPC_OUT0_MUX, MPC_OUT_FLOW_CONTROL_MODE, mask_sh),\
+ SF(MPC_OUT0_MUX, MPC_OUT_FLOW_CONTROL_COUNT, mask_sh), \
+ SF(MPC_RMU_CONTROL, MPC_RMU0_MUX, mask_sh), \
+ SF(MPC_RMU_CONTROL, MPC_RMU1_MUX, mask_sh), \
+ SF(MPC_RMU_CONTROL, MPC_RMU0_MUX_STATUS, mask_sh), \
+ SF(MPC_RMU_CONTROL, MPC_RMU1_MUX_STATUS, mask_sh), \
+ SF(MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_0_1, MPCC_OGAM_RAMA_EXP_REGION0_LUT_OFFSET, mask_sh),\
+ SF(MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_0_1, MPCC_OGAM_RAMA_EXP_REGION0_NUM_SEGMENTS, mask_sh),\
+ SF(MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_0_1, MPCC_OGAM_RAMA_EXP_REGION1_LUT_OFFSET, mask_sh),\
+ SF(MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_0_1, MPCC_OGAM_RAMA_EXP_REGION1_NUM_SEGMENTS, mask_sh),\
+ SF(MPCC_OGAM0_MPCC_OGAM_RAMA_END_CNTL2_B, MPCC_OGAM_RAMA_EXP_REGION_END_SLOPE_B, mask_sh),\
+ SF(MPCC_OGAM0_MPCC_OGAM_RAMA_END_CNTL2_B, MPCC_OGAM_RAMA_EXP_REGION_END_B, mask_sh),\
+ SF(MPCC_OGAM0_MPCC_OGAM_RAMA_END_CNTL1_B, MPCC_OGAM_RAMA_EXP_REGION_END_BASE_B, mask_sh),\
+ SF(MPCC_OGAM0_MPCC_OGAM_RAMA_START_SLOPE_CNTL_B, MPCC_OGAM_RAMA_EXP_REGION_START_SLOPE_B, mask_sh),\
+ SF(MPCC_OGAM0_MPCC_OGAM_RAMA_START_BASE_CNTL_B, MPCC_OGAM_RAMA_EXP_REGION_START_BASE_B, mask_sh),\
+ SF(MPCC_OGAM0_MPCC_OGAM_RAMA_START_CNTL_B, MPCC_OGAM_RAMA_EXP_REGION_START_B, mask_sh),\
+ SF(MPCC_OGAM0_MPCC_OGAM_RAMA_START_CNTL_B, MPCC_OGAM_RAMA_EXP_REGION_START_SEGMENT_B, mask_sh),\
+ SF(MPCC_OGAM0_MPCC_OGAM_RAMA_OFFSET_B, MPCC_OGAM_RAMA_OFFSET_B, mask_sh),\
+ SF(MPCC_OGAM0_MPCC_OGAM_RAMA_OFFSET_G, MPCC_OGAM_RAMA_OFFSET_G, mask_sh),\
+ SF(MPCC_OGAM0_MPCC_OGAM_RAMA_OFFSET_R, MPCC_OGAM_RAMA_OFFSET_R, mask_sh),\
+ SF(MPCC_OGAM0_MPCC_OGAM_LUT_INDEX, MPCC_OGAM_LUT_INDEX, mask_sh),\
+ SF(MPCC_OGAM0_MPCC_OGAM_CONTROL, MPCC_OGAM_MODE, mask_sh),\
+ SF(MPCC_OGAM0_MPCC_OGAM_CONTROL, MPCC_OGAM_SELECT, mask_sh),\
+ SF(MPCC_OGAM0_MPCC_OGAM_CONTROL, MPCC_OGAM_PWL_DISABLE, mask_sh),\
+ SF(MPCC_OGAM0_MPCC_OGAM_CONTROL, MPCC_OGAM_MODE_CURRENT, mask_sh),\
+ SF(MPCC_OGAM0_MPCC_OGAM_CONTROL, MPCC_OGAM_SELECT_CURRENT, mask_sh),\
+ SF(MPCC_OGAM0_MPCC_OGAM_LUT_CONTROL, MPCC_OGAM_LUT_WRITE_COLOR_MASK, mask_sh),\
+ SF(MPCC_OGAM0_MPCC_OGAM_LUT_CONTROL, MPCC_OGAM_LUT_READ_COLOR_SEL, mask_sh),\
+ SF(MPCC_OGAM0_MPCC_OGAM_LUT_CONTROL, MPCC_OGAM_LUT_READ_DBG, mask_sh),\
+ SF(MPCC_OGAM0_MPCC_OGAM_LUT_CONTROL, MPCC_OGAM_LUT_HOST_SEL, mask_sh),\
+ SF(MPCC_OGAM0_MPCC_OGAM_LUT_CONTROL, MPCC_OGAM_LUT_CONFIG_MODE, mask_sh),\
+ /*SF(MPCC_OGAM0_MPCC_OGAM_LUT_CONTROL, MPCC_OGAM_LUT_STATUS, mask_sh),*/\
+ SF(MPCC_OGAM0_MPCC_OGAM_LUT_DATA, MPCC_OGAM_LUT_DATA, mask_sh),\
+ SF(MPC_RMU0_3DLUT_MODE, MPC_RMU_3DLUT_MODE, mask_sh),\
+ SF(MPC_RMU0_3DLUT_MODE, MPC_RMU_3DLUT_SIZE, mask_sh),\
+ /*SF(MPC_RMU0_3DLUT_MODE, MPC_RMU_3DLUT_MODE_CURRENT, mask_sh),*/\
+ SF(MPC_RMU0_3DLUT_READ_WRITE_CONTROL, MPC_RMU_3DLUT_WRITE_EN_MASK, mask_sh),\
+ SF(MPC_RMU0_3DLUT_READ_WRITE_CONTROL, MPC_RMU_3DLUT_RAM_SEL, mask_sh),\
+ SF(MPC_RMU0_3DLUT_READ_WRITE_CONTROL, MPC_RMU_3DLUT_30BIT_EN, mask_sh),\
+ /*SF(MPC_RMU0_3DLUT_READ_WRITE_CONTROL, MPC_RMU_3DLUT_CONFIG_STATUS, mask_sh),*/\
+ SF(MPC_RMU0_3DLUT_READ_WRITE_CONTROL, MPC_RMU_3DLUT_READ_SEL, mask_sh),\
+ SF(MPC_RMU0_3DLUT_INDEX, MPC_RMU_3DLUT_INDEX, mask_sh),\
+ SF(MPC_RMU0_3DLUT_DATA, MPC_RMU_3DLUT_DATA0, mask_sh),\
+ SF(MPC_RMU0_3DLUT_DATA, MPC_RMU_3DLUT_DATA1, mask_sh),\
+ SF(MPC_RMU0_3DLUT_DATA_30BIT, MPC_RMU_3DLUT_DATA_30BIT, mask_sh),\
+ SF(MPC_RMU0_SHAPER_CONTROL, MPC_RMU_SHAPER_LUT_MODE, mask_sh),\
+ /*SF(MPC_RMU0_SHAPER_CONTROL, MPC_RMU_SHAPER_LUT_MODE_CURRENT, mask_sh),*/\
+ SF(MPC_RMU0_SHAPER_OFFSET_R, MPC_RMU_SHAPER_OFFSET_R, mask_sh),\
+ SF(MPC_RMU0_SHAPER_OFFSET_G, MPC_RMU_SHAPER_OFFSET_G, mask_sh),\
+ SF(MPC_RMU0_SHAPER_OFFSET_B, MPC_RMU_SHAPER_OFFSET_B, mask_sh),\
+ SF(MPC_RMU0_SHAPER_SCALE_R, MPC_RMU_SHAPER_SCALE_R, mask_sh),\
+ SF(MPC_RMU0_SHAPER_SCALE_G_B, MPC_RMU_SHAPER_SCALE_G, mask_sh),\
+ SF(MPC_RMU0_SHAPER_SCALE_G_B, MPC_RMU_SHAPER_SCALE_B, mask_sh),\
+ SF(MPC_RMU0_SHAPER_LUT_INDEX, MPC_RMU_SHAPER_LUT_INDEX, mask_sh),\
+ SF(MPC_RMU0_SHAPER_LUT_DATA, MPC_RMU_SHAPER_LUT_DATA, mask_sh),\
+ SF(MPC_RMU0_SHAPER_LUT_WRITE_EN_MASK, MPC_RMU_SHAPER_LUT_WRITE_EN_MASK, mask_sh),\
+ SF(MPC_RMU0_SHAPER_LUT_WRITE_EN_MASK, MPC_RMU_SHAPER_LUT_WRITE_SEL, mask_sh),\
+ /*SF(MPC_RMU0_SHAPER_LUT_WRITE_EN_MASK, MPC_RMU_SHAPER_CONFIG_STATUS, mask_sh),*/\
+ SF(MPC_RMU0_SHAPER_RAMA_START_CNTL_B, MPC_RMU_SHAPER_RAMA_EXP_REGION_START_B, mask_sh),\
+ SF(MPC_RMU0_SHAPER_RAMA_START_CNTL_B, MPC_RMU_SHAPER_RAMA_EXP_REGION_START_SEGMENT_B, mask_sh),\
+ SF(MPC_RMU0_SHAPER_RAMA_END_CNTL_B, MPC_RMU_SHAPER_RAMA_EXP_REGION_END_B, mask_sh),\
+ SF(MPC_RMU0_SHAPER_RAMA_END_CNTL_B, MPC_RMU_SHAPER_RAMA_EXP_REGION_END_BASE_B, mask_sh),\
+ SF(MPC_RMU0_SHAPER_RAMA_REGION_0_1, MPC_RMU_SHAPER_RAMA_EXP_REGION0_LUT_OFFSET, mask_sh),\
+ SF(MPC_RMU0_SHAPER_RAMA_REGION_0_1, MPC_RMU_SHAPER_RAMA_EXP_REGION0_NUM_SEGMENTS, mask_sh),\
+ SF(MPC_RMU0_SHAPER_RAMA_REGION_0_1, MPC_RMU_SHAPER_RAMA_EXP_REGION1_LUT_OFFSET, mask_sh),\
+ SF(MPC_RMU0_SHAPER_RAMA_REGION_0_1, MPC_RMU_SHAPER_RAMA_EXP_REGION1_NUM_SEGMENTS, mask_sh),\
+ SF(MPC_RMU_MEM_PWR_CTRL, MPC_RMU0_MEM_PWR_FORCE, mask_sh),\
+ SF(MPC_RMU_MEM_PWR_CTRL, MPC_RMU0_MEM_PWR_DIS, mask_sh),\
+ SF(MPC_RMU_MEM_PWR_CTRL, MPC_RMU0_SHAPER_MEM_PWR_STATE, mask_sh),\
+ SF(MPC_RMU_MEM_PWR_CTRL, MPC_RMU0_3DLUT_MEM_PWR_STATE, mask_sh),\
+ SF(MPC_RMU_MEM_PWR_CTRL, MPC_RMU1_MEM_PWR_FORCE, mask_sh),\
+ SF(MPC_RMU_MEM_PWR_CTRL, MPC_RMU1_MEM_PWR_DIS, mask_sh),\
+ SF(MPC_RMU_MEM_PWR_CTRL, MPC_RMU1_SHAPER_MEM_PWR_STATE, mask_sh),\
+ SF(MPC_RMU_MEM_PWR_CTRL, MPC_RMU1_3DLUT_MEM_PWR_STATE, mask_sh),\
+ SF(MPC_RMU0_SHAPER_CONTROL, MPC_RMU_SHAPER_MODE_CURRENT, mask_sh),\
+ SF(CUR_VUPDATE_LOCK_SET0, CUR_VUPDATE_LOCK_SET, mask_sh)
+
+
+#define MPC_REG_FIELD_LIST_DCN3_0(type) \
+ MPC_REG_FIELD_LIST_DCN2_0(type) \
+ type MPC_DWB0_MUX;\
+ type MPC_DWB0_MUX_STATUS;\
+ type MPC_OUT_RATE_CONTROL;\
+ type MPC_OUT_RATE_CONTROL_DISABLE;\
+ type MPC_OUT_FLOW_CONTROL_MODE;\
+ type MPC_OUT_FLOW_CONTROL_COUNT; \
+ type MPCC_GAMUT_REMAP_MODE; \
+ type MPCC_GAMUT_REMAP_MODE_CURRENT;\
+ type MPCC_GAMUT_REMAP_COEF_FORMAT; \
+ type MPCC_GAMUT_REMAP_C11_A; \
+ type MPCC_GAMUT_REMAP_C12_A; \
+ type MPC_RMU0_MUX; \
+ type MPC_RMU1_MUX; \
+ type MPC_RMU0_MUX_STATUS; \
+ type MPC_RMU1_MUX_STATUS; \
+ type MPC_RMU0_MEM_PWR_FORCE;\
+ type MPC_RMU0_MEM_PWR_DIS;\
+ type MPC_RMU0_SHAPER_MEM_PWR_STATE;\
+ type MPC_RMU0_3DLUT_MEM_PWR_STATE;\
+ type MPC_RMU1_MEM_PWR_FORCE;\
+ type MPC_RMU1_MEM_PWR_DIS;\
+ type MPC_RMU1_SHAPER_MEM_PWR_STATE;\
+ type MPC_RMU1_3DLUT_MEM_PWR_STATE;\
+ type MPCC_OGAM_RAMA_EXP_REGION_START_SLOPE_B; \
+ type MPCC_OGAM_RAMA_EXP_REGION_START_BASE_B;\
+ type MPCC_OGAM_RAMA_OFFSET_B;\
+ type MPCC_OGAM_RAMA_OFFSET_G;\
+ type MPCC_OGAM_RAMA_OFFSET_R;\
+ type MPCC_OGAM_SELECT; \
+ type MPCC_OGAM_PWL_DISABLE; \
+ type MPCC_OGAM_MODE_CURRENT; \
+ type MPCC_OGAM_SELECT_CURRENT; \
+ type MPCC_OGAM_LUT_WRITE_COLOR_MASK; \
+ type MPCC_OGAM_LUT_READ_COLOR_SEL; \
+ type MPCC_OGAM_LUT_READ_DBG; \
+ type MPCC_OGAM_LUT_HOST_SEL; \
+ type MPCC_OGAM_LUT_CONFIG_MODE; \
+ type MPCC_OGAM_LUT_STATUS; \
+ type MPCC_OGAM_RAMA_START_BASE_CNTL_B;\
+ type MPC_RMU_3DLUT_MODE; \
+ type MPC_RMU_3DLUT_SIZE; \
+ type MPC_RMU_3DLUT_MODE_CURRENT; \
+ type MPC_RMU_3DLUT_WRITE_EN_MASK;\
+ type MPC_RMU_3DLUT_RAM_SEL;\
+ type MPC_RMU_3DLUT_30BIT_EN;\
+ type MPC_RMU_3DLUT_CONFIG_STATUS;\
+ type MPC_RMU_3DLUT_READ_SEL;\
+ type MPC_RMU_3DLUT_INDEX;\
+ type MPC_RMU_3DLUT_DATA0;\
+ type MPC_RMU_3DLUT_DATA1;\
+ type MPC_RMU_3DLUT_DATA_30BIT;\
+ type MPC_RMU_SHAPER_LUT_MODE;\
+ type MPC_RMU_SHAPER_LUT_MODE_CURRENT;\
+ type MPC_RMU_SHAPER_OFFSET_R;\
+ type MPC_RMU_SHAPER_OFFSET_G;\
+ type MPC_RMU_SHAPER_OFFSET_B;\
+ type MPC_RMU_SHAPER_SCALE_R;\
+ type MPC_RMU_SHAPER_SCALE_G;\
+ type MPC_RMU_SHAPER_SCALE_B;\
+ type MPC_RMU_SHAPER_LUT_INDEX;\
+ type MPC_RMU_SHAPER_LUT_DATA;\
+ type MPC_RMU_SHAPER_LUT_WRITE_EN_MASK;\
+ type MPC_RMU_SHAPER_LUT_WRITE_SEL;\
+ type MPC_RMU_SHAPER_CONFIG_STATUS;\
+ type MPC_RMU_SHAPER_RAMA_EXP_REGION_START_B;\
+ type MPC_RMU_SHAPER_RAMA_EXP_REGION_START_SEGMENT_B;\
+ type MPC_RMU_SHAPER_RAMA_EXP_REGION_END_B;\
+ type MPC_RMU_SHAPER_RAMA_EXP_REGION_END_BASE_B;\
+ type MPC_RMU_SHAPER_RAMA_EXP_REGION0_LUT_OFFSET;\
+ type MPC_RMU_SHAPER_RAMA_EXP_REGION0_NUM_SEGMENTS;\
+ type MPC_RMU_SHAPER_RAMA_EXP_REGION1_LUT_OFFSET;\
+ type MPC_RMU_SHAPER_RAMA_EXP_REGION1_NUM_SEGMENTS;\
+ type MPC_RMU_SHAPER_MODE_CURRENT
+
+
+struct dcn30_mpc_registers {
+ MPC_REG_VARIABLE_LIST_DCN3_0;
+};
+
+struct dcn30_mpc_shift {
+ MPC_REG_FIELD_LIST_DCN3_0(uint8_t);
+};
+
+struct dcn30_mpc_mask {
+ MPC_REG_FIELD_LIST_DCN3_0(uint32_t);
+};
+
+struct dcn30_mpc {
+ struct mpc base;
+
+ int mpcc_in_use_mask;
+ int num_mpcc;
+ const struct dcn30_mpc_registers *mpc_regs;
+ const struct dcn30_mpc_shift *mpc_shift;
+ const struct dcn30_mpc_mask *mpc_mask;
+ int num_rmu;
+};
+
+void dcn30_mpc_construct(struct dcn30_mpc *mpc30,
+ struct dc_context *ctx,
+ const struct dcn30_mpc_registers *mpc_regs,
+ const struct dcn30_mpc_shift *mpc_shift,
+ const struct dcn30_mpc_mask *mpc_mask,
+ int num_mpcc,
+ int num_rmu);
+
+bool mpc3_program_shaper(
+ struct mpc *mpc,
+ const struct pwl_params *params,
+ uint32_t rmu_idx);
+
+bool mpc3_program_3dlut(
+ struct mpc *mpc,
+ const struct tetrahedral_params *params,
+ int rmu_idx);
+
+uint32_t mpcc3_acquire_rmu(struct mpc *mpc,
+ int mpcc_id, int rmu_idx);
+
+void mpc3_set_denorm(
+ struct mpc *mpc,
+ int opp_id,
+ enum dc_color_depth output_depth);
+
+void mpc3_set_denorm_clamp(
+ struct mpc *mpc,
+ int opp_id,
+ struct mpc_denorm_clamp denorm_clamp);
+
+void mpc3_set_output_csc(
+ struct mpc *mpc,
+ int opp_id,
+ const uint16_t *regval,
+ enum mpc_output_csc_mode ocsc_mode);
+
+void mpc3_set_ocsc_default(
+ struct mpc *mpc,
+ int opp_id,
+ enum dc_color_space color_space,
+ enum mpc_output_csc_mode ocsc_mode);
+
+void mpc3_set_output_gamma(
+ struct mpc *mpc,
+ int mpcc_id,
+ const struct pwl_params *params);
+
+uint32_t mpc3_get_rmu_mux_status(
+ struct mpc *mpc,
+ int rmu_idx);
+
+void mpc3_set_gamut_remap(
+ struct mpc *mpc,
+ int mpcc_id,
+ const struct mpc_grph_gamut_adjustment *adjust);
+
+void mpc3_set_rmu_mux(
+ struct mpc *mpc,
+ int rmu_idx,
+ int value);
+
+#endif
diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_opp.h b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_opp.h
new file mode 100644
index 000000000000..78f7cf772bbc
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_opp.h
@@ -0,0 +1,36 @@
+/* Copyright 2020 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef __DC_OPP_DCN30_H__
+#define __DC_OPP_DCN30_H__
+
+#include "dcn20/dcn20_opp.h"
+
+
+#define OPP_REG_LIST_DCN30(id) \
+ OPP_REG_LIST_DCN10(id), \
+ OPP_DPG_REG_LIST(id), \
+ SRI(FMT_422_CONTROL, FMT, id)
+
+#endif
diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_optc.c b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_optc.c
new file mode 100644
index 000000000000..224c8d145eba
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_optc.c
@@ -0,0 +1,365 @@
+/*
+ * Copyright 2020 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#include "reg_helper.h"
+#include "dcn30_optc.h"
+#include "dc.h"
+#include "dcn_calc_math.h"
+
+#define REG(reg)\
+ optc1->tg_regs->reg
+
+#define CTX \
+ optc1->base.ctx
+
+#undef FN
+#define FN(reg_name, field_name) \
+ optc1->tg_shift->field_name, optc1->tg_mask->field_name
+
+void optc3_triplebuffer_lock(struct timing_generator *optc)
+{
+ struct optc *optc1 = DCN10TG_FROM_TG(optc);
+
+ REG_UPDATE(OTG_GLOBAL_CONTROL2,
+ OTG_MASTER_UPDATE_LOCK_SEL, optc->inst);
+
+ REG_SET(OTG_VUPDATE_KEEPOUT, 0,
+ OTG_MASTER_UPDATE_LOCK_VUPDATE_KEEPOUT_EN, 1);
+
+ REG_SET(OTG_MASTER_UPDATE_LOCK, 0,
+ OTG_MASTER_UPDATE_LOCK, 1);
+
+ if (optc->ctx->dce_environment != DCE_ENV_FPGA_MAXIMUS)
+ REG_WAIT(OTG_MASTER_UPDATE_LOCK,
+ UPDATE_LOCK_STATUS, 1,
+ 1, 10);
+}
+
+void optc3_lock_doublebuffer_enable(struct timing_generator *optc)
+{
+ struct optc *optc1 = DCN10TG_FROM_TG(optc);
+ uint32_t v_blank_start = 0;
+ uint32_t v_blank_end = 0;
+ uint32_t h_blank_start = 0;
+ uint32_t h_blank_end = 0;
+
+ REG_GET_2(OTG_V_BLANK_START_END,
+ OTG_V_BLANK_START, &v_blank_start,
+ OTG_V_BLANK_END, &v_blank_end);
+ REG_GET_2(OTG_H_BLANK_START_END,
+ OTG_H_BLANK_START, &h_blank_start,
+ OTG_H_BLANK_END, &h_blank_end);
+
+ REG_UPDATE_2(OTG_GLOBAL_CONTROL1,
+ MASTER_UPDATE_LOCK_DB_START_Y, v_blank_start,
+ MASTER_UPDATE_LOCK_DB_END_Y, v_blank_end);
+ REG_UPDATE_2(OTG_GLOBAL_CONTROL4,
+ DIG_UPDATE_POSITION_X, 20,
+ DIG_UPDATE_POSITION_Y, v_blank_start);
+ REG_UPDATE_3(OTG_GLOBAL_CONTROL0,
+ MASTER_UPDATE_LOCK_DB_START_X, h_blank_start - 200 - 1,
+ MASTER_UPDATE_LOCK_DB_END_X, h_blank_end,
+ MASTER_UPDATE_LOCK_DB_EN, 1);
+ REG_UPDATE(OTG_GLOBAL_CONTROL2, GLOBAL_UPDATE_LOCK_EN, 1);
+}
+
+void optc3_lock_doublebuffer_disable(struct timing_generator *optc)
+{
+ struct optc *optc1 = DCN10TG_FROM_TG(optc);
+
+ REG_UPDATE_2(OTG_GLOBAL_CONTROL0,
+ MASTER_UPDATE_LOCK_DB_START_X, 0,
+ MASTER_UPDATE_LOCK_DB_END_X, 0);
+ REG_UPDATE_2(OTG_GLOBAL_CONTROL1,
+ MASTER_UPDATE_LOCK_DB_START_Y, 0,
+ MASTER_UPDATE_LOCK_DB_END_Y, 0);
+
+ REG_UPDATE(OTG_GLOBAL_CONTROL2, GLOBAL_UPDATE_LOCK_EN, 0);
+ REG_UPDATE(OTG_GLOBAL_CONTROL0, MASTER_UPDATE_LOCK_DB_EN, 1);
+}
+
+void optc3_lock(struct timing_generator *optc)
+{
+ struct optc *optc1 = DCN10TG_FROM_TG(optc);
+
+ REG_UPDATE(OTG_GLOBAL_CONTROL2,
+ OTG_MASTER_UPDATE_LOCK_SEL, optc->inst);
+ REG_SET(OTG_MASTER_UPDATE_LOCK, 0,
+ OTG_MASTER_UPDATE_LOCK, 1);
+
+ /* Should be fast, status does not update on maximus */
+ if (optc->ctx->dce_environment != DCE_ENV_FPGA_MAXIMUS)
+ REG_WAIT(OTG_MASTER_UPDATE_LOCK,
+ UPDATE_LOCK_STATUS, 1,
+ 1, 10);
+}
+
+void optc3_set_out_mux(struct timing_generator *optc, enum otg_out_mux_dest dest)
+{
+ struct optc *optc1 = DCN10TG_FROM_TG(optc);
+
+ REG_UPDATE(OTG_CONTROL, OTG_OUT_MUX, dest);
+}
+
+void optc3_program_blank_color(struct timing_generator *optc,
+ const struct tg_color *blank_color)
+{
+ struct optc *optc1 = DCN10TG_FROM_TG(optc);
+
+ REG_SET_3(OTG_BLANK_DATA_COLOR, 0,
+ OTG_BLANK_DATA_COLOR_BLUE_CB, blank_color->color_b_cb,
+ OTG_BLANK_DATA_COLOR_GREEN_Y, blank_color->color_g_y,
+ OTG_BLANK_DATA_COLOR_RED_CR, blank_color->color_r_cr);
+
+ REG_SET_3(OTG_BLANK_DATA_COLOR_EXT, 0,
+ OTG_BLANK_DATA_COLOR_BLUE_CB_EXT, blank_color->color_b_cb >> 10,
+ OTG_BLANK_DATA_COLOR_GREEN_Y_EXT, blank_color->color_g_y >> 10,
+ OTG_BLANK_DATA_COLOR_RED_CR_EXT, blank_color->color_r_cr >> 10);
+}
+
+void optc3_set_drr_trigger_window(struct timing_generator *optc,
+ uint32_t window_start, uint32_t window_end)
+{
+ struct optc *optc1 = DCN10TG_FROM_TG(optc);
+
+ REG_SET_2(OTG_DRR_TRIGGER_WINDOW, 0,
+ OTG_DRR_TRIGGER_WINDOW_START_X, window_start,
+ OTG_DRR_TRIGGER_WINDOW_END_X, window_end);
+}
+
+void optc3_set_vtotal_change_limit(struct timing_generator *optc,
+ uint32_t limit)
+{
+ struct optc *optc1 = DCN10TG_FROM_TG(optc);
+
+
+ REG_SET(OTG_DRR_V_TOTAL_CHANGE, 0,
+ OTG_DRR_V_TOTAL_CHANGE_LIMIT, limit);
+}
+
+
+/* Set DSC-related configuration.
+ * dsc_mode: 0 disables DSC, other values enable DSC in specified format
+ * sc_bytes_per_pixel: Bytes per pixel in u3.28 format
+ * dsc_slice_width: Slice width in pixels
+ */
+void optc3_set_dsc_config(struct timing_generator *optc,
+ enum optc_dsc_mode dsc_mode,
+ uint32_t dsc_bytes_per_pixel,
+ uint32_t dsc_slice_width)
+{
+ struct optc *optc1 = DCN10TG_FROM_TG(optc);
+
+ optc2_set_dsc_config(optc, dsc_mode, dsc_bytes_per_pixel,
+ dsc_slice_width);
+
+ REG_UPDATE(OTG_V_SYNC_A_CNTL, OTG_V_SYNC_MODE, 0);
+
+}
+
+
+static void optc3_set_odm_bypass(struct timing_generator *optc,
+ const struct dc_crtc_timing *dc_crtc_timing)
+{
+ struct optc *optc1 = DCN10TG_FROM_TG(optc);
+ enum h_timing_div_mode h_div = H_TIMING_NO_DIV;
+
+ REG_SET_5(OPTC_DATA_SOURCE_SELECT, 0,
+ OPTC_NUM_OF_INPUT_SEGMENT, 0,
+ OPTC_SEG0_SRC_SEL, optc->inst,
+ OPTC_SEG1_SRC_SEL, 0xf,
+ OPTC_SEG2_SRC_SEL, 0xf,
+ OPTC_SEG3_SRC_SEL, 0xf
+ );
+
+ h_div = optc1_is_two_pixels_per_containter(dc_crtc_timing);
+ REG_SET(OTG_H_TIMING_CNTL, 0,
+ OTG_H_TIMING_DIV_MODE, h_div);
+
+ REG_SET(OPTC_MEMORY_CONFIG, 0,
+ OPTC_MEM_SEL, 0);
+ optc1->opp_count = 1;
+}
+
+static void optc3_set_odm_combine(struct timing_generator *optc, int *opp_id, int opp_cnt,
+ struct dc_crtc_timing *timing)
+{
+ struct optc *optc1 = DCN10TG_FROM_TG(optc);
+ int mpcc_hactive = (timing->h_addressable + timing->h_border_left + timing->h_border_right)
+ / opp_cnt;
+ uint32_t memory_mask = 0;
+ uint32_t data_fmt = 0;
+
+ /* TODO: In pseudocode but does not affect maximus, delete comment if we dont need on asic
+ * REG_SET(OTG_GLOBAL_CONTROL2, 0, GLOBAL_UPDATE_LOCK_EN, 1);
+ * Program OTG register MASTER_UPDATE_LOCK_DB_X/Y to the position before DP frame start
+ * REG_SET_2(OTG_GLOBAL_CONTROL1, 0,
+ * MASTER_UPDATE_LOCK_DB_X, 160,
+ * MASTER_UPDATE_LOCK_DB_Y, 240);
+ */
+
+ ASSERT(opp_cnt == 2 || opp_cnt == 4);
+
+ /* 2 pieces of memory required for up to 5120 displays, 4 for up to 8192,
+ * however, for ODM combine we can simplify by always using 4.
+ */
+ if (opp_cnt == 2) {
+ /* To make sure there's no memory overlap, each instance "reserves" 2
+ * memories and they are uniquely combined here.
+ */
+ memory_mask = 0x3 << (opp_id[0] * 2) | 0x3 << (opp_id[1] * 2);
+ } else if (opp_cnt == 4) {
+ /* To make sure there's no memory overlap, each instance "reserves" 1
+ * memory and they are uniquely combined here.
+ */
+ memory_mask = 0x1 << (opp_id[0] * 2) | 0x1 << (opp_id[1] * 2) | 0x1 << (opp_id[2] * 2) | 0x1 << (opp_id[3] * 2);
+ }
+
+ if (REG(OPTC_MEMORY_CONFIG))
+ REG_SET(OPTC_MEMORY_CONFIG, 0,
+ OPTC_MEM_SEL, memory_mask);
+
+ if (timing->pixel_encoding == PIXEL_ENCODING_YCBCR422)
+ data_fmt = 1;
+ else if (timing->pixel_encoding == PIXEL_ENCODING_YCBCR420)
+ data_fmt = 2;
+
+ REG_UPDATE(OPTC_DATA_FORMAT_CONTROL, OPTC_DATA_FORMAT, data_fmt);
+
+ if (opp_cnt == 2) {
+ REG_SET_3(OPTC_DATA_SOURCE_SELECT, 0,
+ OPTC_NUM_OF_INPUT_SEGMENT, 1,
+ OPTC_SEG0_SRC_SEL, opp_id[0],
+ OPTC_SEG1_SRC_SEL, opp_id[1]);
+ } else if (opp_cnt == 4) {
+ REG_SET_5(OPTC_DATA_SOURCE_SELECT, 0,
+ OPTC_NUM_OF_INPUT_SEGMENT, 3,
+ OPTC_SEG0_SRC_SEL, opp_id[0],
+ OPTC_SEG1_SRC_SEL, opp_id[1],
+ OPTC_SEG2_SRC_SEL, opp_id[2],
+ OPTC_SEG3_SRC_SEL, opp_id[3]);
+ }
+
+ REG_UPDATE(OPTC_WIDTH_CONTROL,
+ OPTC_SEGMENT_WIDTH, mpcc_hactive);
+
+ REG_SET(OTG_H_TIMING_CNTL, 0, OTG_H_TIMING_DIV_MODE, opp_cnt - 1);
+ optc1->opp_count = opp_cnt;
+}
+
+/**
+ * optc3_set_timing_double_buffer() - DRR double buffering control
+ *
+ * Sets double buffer point for V_TOTAL, H_TOTAL, VTOTAL_MIN,
+ * VTOTAL_MAX, VTOTAL_MIN_SEL and VTOTAL_MAX_SEL registers.
+ *
+ * Options: any time, start of frame, dp start of frame (range timing)
+ */
+void optc3_set_timing_double_buffer(struct timing_generator *optc, bool enable)
+{
+ struct optc *optc1 = DCN10TG_FROM_TG(optc);
+ uint32_t mode = enable ? 2 : 0;
+
+ REG_UPDATE(OTG_DOUBLE_BUFFER_CONTROL,
+ OTG_DRR_TIMING_DBUF_UPDATE_MODE, mode);
+}
+
+void optc3_tg_init(struct timing_generator *optc)
+{
+ optc3_set_timing_double_buffer(optc, true);
+ optc1_clear_optc_underflow(optc);
+}
+
+static struct timing_generator_funcs dcn30_tg_funcs = {
+ .validate_timing = optc1_validate_timing,
+ .program_timing = optc1_program_timing,
+ .setup_vertical_interrupt0 = optc1_setup_vertical_interrupt0,
+ .setup_vertical_interrupt1 = optc1_setup_vertical_interrupt1,
+ .setup_vertical_interrupt2 = optc1_setup_vertical_interrupt2,
+ .program_global_sync = optc1_program_global_sync,
+ .enable_crtc = optc2_enable_crtc,
+ .disable_crtc = optc1_disable_crtc,
+ /* used by enable_timing_synchronization. Not need for FPGA */
+ .is_counter_moving = optc1_is_counter_moving,
+ .get_position = optc1_get_position,
+ .get_frame_count = optc1_get_vblank_counter,
+ .get_scanoutpos = optc1_get_crtc_scanoutpos,
+ .get_otg_active_size = optc1_get_otg_active_size,
+ .set_early_control = optc1_set_early_control,
+ /* used by enable_timing_synchronization. Not need for FPGA */
+ .wait_for_state = optc1_wait_for_state,
+ .set_blank_color = optc3_program_blank_color,
+ .did_triggered_reset_occur = optc1_did_triggered_reset_occur,
+ .triplebuffer_lock = optc3_triplebuffer_lock,
+ .triplebuffer_unlock = optc2_triplebuffer_unlock,
+ .enable_reset_trigger = optc1_enable_reset_trigger,
+ .enable_crtc_reset = optc1_enable_crtc_reset,
+ .disable_reset_trigger = optc1_disable_reset_trigger,
+ .lock = optc3_lock,
+ .unlock = optc1_unlock,
+ .lock_doublebuffer_enable = optc3_lock_doublebuffer_enable,
+ .lock_doublebuffer_disable = optc3_lock_doublebuffer_disable,
+ .enable_optc_clock = optc1_enable_optc_clock,
+ .set_drr = optc1_set_drr,
+ .set_static_screen_control = optc1_set_static_screen_control,
+ .program_stereo = optc1_program_stereo,
+ .is_stereo_left_eye = optc1_is_stereo_left_eye,
+ .tg_init = optc3_tg_init,
+ .is_tg_enabled = optc1_is_tg_enabled,
+ .is_optc_underflow_occurred = optc1_is_optc_underflow_occurred,
+ .clear_optc_underflow = optc1_clear_optc_underflow,
+ .setup_global_swap_lock = NULL,
+ .get_crc = optc1_get_crc,
+ .configure_crc = optc2_configure_crc,
+ .set_dsc_config = optc3_set_dsc_config,
+ .set_dwb_source = NULL,
+ .set_odm_bypass = optc3_set_odm_bypass,
+ .set_odm_combine = optc3_set_odm_combine,
+ .get_optc_source = optc2_get_optc_source,
+ .set_out_mux = optc3_set_out_mux,
+ .set_drr_trigger_window = optc3_set_drr_trigger_window,
+ .set_vtotal_change_limit = optc3_set_vtotal_change_limit,
+ .set_gsl = optc2_set_gsl,
+ .set_gsl_source_select = optc2_set_gsl_source_select,
+ .set_vtg_params = optc1_set_vtg_params,
+ .program_manual_trigger = optc2_program_manual_trigger,
+ .setup_manual_trigger = optc2_setup_manual_trigger,
+ .get_hw_timing = optc1_get_hw_timing,
+};
+
+void dcn30_timing_generator_init(struct optc *optc1)
+{
+ optc1->base.funcs = &dcn30_tg_funcs;
+
+ optc1->max_h_total = optc1->tg_mask->OTG_H_TOTAL + 1;
+ optc1->max_v_total = optc1->tg_mask->OTG_V_TOTAL + 1;
+
+ optc1->min_h_blank = 32;
+ optc1->min_v_blank = 3;
+ optc1->min_v_blank_interlace = 5;
+ optc1->min_h_sync_width = 8;
+ optc1->min_v_sync_width = 1;
+}
+
diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_optc.h b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_optc.h
new file mode 100644
index 000000000000..d4106dd5a9b0
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_optc.h
@@ -0,0 +1,333 @@
+/*
+ * Copyright 2020 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef __DC_OPTC_DCN30_H__
+#define __DC_OPTC_DCN30_H__
+
+#include "dcn20/dcn20_optc.h"
+
+
+#define OPTC_COMMON_REG_LIST_DCN3_BASE(inst) \
+ SRI(OTG_VSTARTUP_PARAM, OTG, inst),\
+ SRI(OTG_VUPDATE_PARAM, OTG, inst),\
+ SRI(OTG_VREADY_PARAM, OTG, inst),\
+ SRI(OTG_MASTER_UPDATE_LOCK, OTG, inst),\
+ SRI(OTG_GLOBAL_CONTROL0, OTG, inst),\
+ SRI(OTG_GLOBAL_CONTROL1, OTG, inst),\
+ SRI(OTG_GLOBAL_CONTROL2, OTG, inst),\
+ SRI(OTG_GLOBAL_CONTROL4, OTG, inst),\
+ SRI(OTG_DOUBLE_BUFFER_CONTROL, OTG, inst),\
+ SRI(OTG_H_TOTAL, OTG, inst),\
+ SRI(OTG_H_BLANK_START_END, OTG, inst),\
+ SRI(OTG_H_SYNC_A, OTG, inst),\
+ SRI(OTG_H_SYNC_A_CNTL, OTG, inst),\
+ SRI(OTG_H_TIMING_CNTL, OTG, inst),\
+ SRI(OTG_V_TOTAL, OTG, inst),\
+ SRI(OTG_V_BLANK_START_END, OTG, inst),\
+ SRI(OTG_V_SYNC_A, OTG, inst),\
+ SRI(OTG_V_SYNC_A_CNTL, OTG, inst),\
+ SRI(OTG_CONTROL, OTG, inst),\
+ SRI(OTG_STEREO_CONTROL, OTG, inst),\
+ SRI(OTG_3D_STRUCTURE_CONTROL, OTG, inst),\
+ SRI(OTG_STEREO_STATUS, OTG, inst),\
+ SRI(OTG_V_TOTAL_MAX, OTG, inst),\
+ SRI(OTG_V_TOTAL_MIN, OTG, inst),\
+ SRI(OTG_V_TOTAL_CONTROL, OTG, inst),\
+ SRI(OTG_TRIGA_CNTL, OTG, inst),\
+ SRI(OTG_FORCE_COUNT_NOW_CNTL, OTG, inst),\
+ SRI(OTG_STATIC_SCREEN_CONTROL, OTG, inst),\
+ SRI(OTG_STATUS_FRAME_COUNT, OTG, inst),\
+ SRI(OTG_STATUS, OTG, inst),\
+ SRI(OTG_STATUS_POSITION, OTG, inst),\
+ SRI(OTG_NOM_VERT_POSITION, OTG, inst),\
+ SRI(OTG_BLANK_DATA_COLOR, OTG, inst),\
+ SRI(OTG_BLANK_DATA_COLOR_EXT, OTG, inst),\
+ SRI(OTG_M_CONST_DTO0, OTG, inst),\
+ SRI(OTG_M_CONST_DTO1, OTG, inst),\
+ SRI(OTG_CLOCK_CONTROL, OTG, inst),\
+ SRI(OTG_VERTICAL_INTERRUPT0_CONTROL, OTG, inst),\
+ SRI(OTG_VERTICAL_INTERRUPT0_POSITION, OTG, inst),\
+ SRI(OTG_VERTICAL_INTERRUPT1_CONTROL, OTG, inst),\
+ SRI(OTG_VERTICAL_INTERRUPT1_POSITION, OTG, inst),\
+ SRI(OTG_VERTICAL_INTERRUPT2_CONTROL, OTG, inst),\
+ SRI(OTG_VERTICAL_INTERRUPT2_POSITION, OTG, inst),\
+ SRI(OPTC_INPUT_CLOCK_CONTROL, ODM, inst),\
+ SRI(OPTC_DATA_SOURCE_SELECT, ODM, inst),\
+ SRI(OPTC_INPUT_GLOBAL_CONTROL, ODM, inst),\
+ SRI(CONTROL, VTG, inst),\
+ SRI(OTG_VERT_SYNC_CONTROL, OTG, inst),\
+ SRI(OTG_GSL_CONTROL, OTG, inst),\
+ SRI(OTG_CRC_CNTL, OTG, inst),\
+ SRI(OTG_CRC0_DATA_RG, OTG, inst),\
+ SRI(OTG_CRC0_DATA_B, OTG, inst),\
+ SRI(OTG_CRC0_WINDOWA_X_CONTROL, OTG, inst),\
+ SRI(OTG_CRC0_WINDOWA_Y_CONTROL, OTG, inst),\
+ SRI(OTG_CRC0_WINDOWB_X_CONTROL, OTG, inst),\
+ SRI(OTG_CRC0_WINDOWB_Y_CONTROL, OTG, inst),\
+ SR(GSL_SOURCE_SELECT),\
+ SRI(OTG_TRIGA_MANUAL_TRIG, OTG, inst)
+
+
+#define OPTC_COMMON_REG_LIST_DCN3_0(inst) \
+ OPTC_COMMON_REG_LIST_DCN3_BASE(inst),\
+ SRI(OTG_GLOBAL_CONTROL1, OTG, inst),\
+ SRI(OTG_GLOBAL_CONTROL2, OTG, inst),\
+ SRI(OTG_GSL_WINDOW_X, OTG, inst),\
+ SRI(OTG_GSL_WINDOW_Y, OTG, inst),\
+ SRI(OTG_VUPDATE_KEEPOUT, OTG, inst),\
+ SRI(OTG_DSC_START_POSITION, OTG, inst),\
+ SRI(OTG_DRR_TRIGGER_WINDOW, OTG, inst),\
+ SRI(OTG_DRR_V_TOTAL_CHANGE, OTG, inst),\
+ SRI(OPTC_DATA_FORMAT_CONTROL, ODM, inst),\
+ SRI(OPTC_BYTES_PER_PIXEL, ODM, inst),\
+ SRI(OPTC_WIDTH_CONTROL, ODM, inst),\
+ SRI(OPTC_MEMORY_CONFIG, ODM, inst),\
+ SR(DWB_SOURCE_SELECT)
+
+
+#define OPTC_COMMON_MASK_SH_LIST_DCN3_BASE(mask_sh)\
+ SF(OTG0_OTG_VSTARTUP_PARAM, VSTARTUP_START, mask_sh),\
+ SF(OTG0_OTG_VUPDATE_PARAM, VUPDATE_OFFSET, mask_sh),\
+ SF(OTG0_OTG_VUPDATE_PARAM, VUPDATE_WIDTH, mask_sh),\
+ SF(OTG0_OTG_VREADY_PARAM, VREADY_OFFSET, mask_sh),\
+ SF(OTG0_OTG_MASTER_UPDATE_LOCK, OTG_MASTER_UPDATE_LOCK, mask_sh),\
+ SF(OTG0_OTG_MASTER_UPDATE_LOCK, UPDATE_LOCK_STATUS, mask_sh),\
+ SF(OTG0_OTG_GLOBAL_CONTROL0, MASTER_UPDATE_LOCK_DB_START_X, mask_sh),\
+ SF(OTG0_OTG_GLOBAL_CONTROL0, MASTER_UPDATE_LOCK_DB_END_X, mask_sh),\
+ SF(OTG0_OTG_GLOBAL_CONTROL0, MASTER_UPDATE_LOCK_DB_EN, mask_sh),\
+ SF(OTG0_OTG_GLOBAL_CONTROL1, MASTER_UPDATE_LOCK_DB_START_Y, mask_sh),\
+ SF(OTG0_OTG_GLOBAL_CONTROL1, MASTER_UPDATE_LOCK_DB_END_Y, mask_sh),\
+ SF(OTG0_OTG_GLOBAL_CONTROL2, OTG_MASTER_UPDATE_LOCK_SEL, mask_sh),\
+ SF(OTG0_OTG_GLOBAL_CONTROL4, DIG_UPDATE_POSITION_X, mask_sh),\
+ SF(OTG0_OTG_GLOBAL_CONTROL4, DIG_UPDATE_POSITION_Y, mask_sh),\
+ SF(OTG0_OTG_DOUBLE_BUFFER_CONTROL, OTG_UPDATE_PENDING, mask_sh),\
+ SF(OTG0_OTG_H_TOTAL, OTG_H_TOTAL, mask_sh),\
+ SF(OTG0_OTG_H_BLANK_START_END, OTG_H_BLANK_START, mask_sh),\
+ SF(OTG0_OTG_H_BLANK_START_END, OTG_H_BLANK_END, mask_sh),\
+ SF(OTG0_OTG_H_SYNC_A, OTG_H_SYNC_A_START, mask_sh),\
+ SF(OTG0_OTG_H_SYNC_A, OTG_H_SYNC_A_END, mask_sh),\
+ SF(OTG0_OTG_H_SYNC_A_CNTL, OTG_H_SYNC_A_POL, mask_sh),\
+ SF(OTG0_OTG_V_TOTAL, OTG_V_TOTAL, mask_sh),\
+ SF(OTG0_OTG_V_BLANK_START_END, OTG_V_BLANK_START, mask_sh),\
+ SF(OTG0_OTG_V_BLANK_START_END, OTG_V_BLANK_END, mask_sh),\
+ SF(OTG0_OTG_V_SYNC_A, OTG_V_SYNC_A_START, mask_sh),\
+ SF(OTG0_OTG_V_SYNC_A, OTG_V_SYNC_A_END, mask_sh),\
+ SF(OTG0_OTG_V_SYNC_A_CNTL, OTG_V_SYNC_A_POL, mask_sh),\
+ SF(OTG0_OTG_V_SYNC_A_CNTL, OTG_V_SYNC_MODE, mask_sh),\
+ SF(OTG0_OTG_CONTROL, OTG_MASTER_EN, mask_sh),\
+ SF(OTG0_OTG_CONTROL, OTG_START_POINT_CNTL, mask_sh),\
+ SF(OTG0_OTG_CONTROL, OTG_DISABLE_POINT_CNTL, mask_sh),\
+ SF(OTG0_OTG_CONTROL, OTG_FIELD_NUMBER_CNTL, mask_sh),\
+ SF(OTG0_OTG_CONTROL, OTG_OUT_MUX, mask_sh),\
+ SF(OTG0_OTG_STEREO_CONTROL, OTG_STEREO_EN, mask_sh),\
+ SF(OTG0_OTG_STEREO_CONTROL, OTG_STEREO_SYNC_OUTPUT_LINE_NUM, mask_sh),\
+ SF(OTG0_OTG_STEREO_CONTROL, OTG_STEREO_SYNC_OUTPUT_POLARITY, mask_sh),\
+ SF(OTG0_OTG_STEREO_CONTROL, OTG_STEREO_EYE_FLAG_POLARITY, mask_sh),\
+ SF(OTG0_OTG_STEREO_CONTROL, OTG_DISABLE_STEREOSYNC_OUTPUT_FOR_DP, mask_sh),\
+ SF(OTG0_OTG_STEREO_STATUS, OTG_STEREO_CURRENT_EYE, mask_sh),\
+ SF(OTG0_OTG_3D_STRUCTURE_CONTROL, OTG_3D_STRUCTURE_EN, mask_sh),\
+ SF(OTG0_OTG_3D_STRUCTURE_CONTROL, OTG_3D_STRUCTURE_V_UPDATE_MODE, mask_sh),\
+ SF(OTG0_OTG_3D_STRUCTURE_CONTROL, OTG_3D_STRUCTURE_STEREO_SEL_OVR, mask_sh),\
+ SF(OTG0_OTG_V_TOTAL_MAX, OTG_V_TOTAL_MAX, mask_sh),\
+ SF(OTG0_OTG_V_TOTAL_MIN, OTG_V_TOTAL_MIN, mask_sh),\
+ SF(OTG0_OTG_V_TOTAL_CONTROL, OTG_V_TOTAL_MIN_SEL, mask_sh),\
+ SF(OTG0_OTG_V_TOTAL_CONTROL, OTG_V_TOTAL_MAX_SEL, mask_sh),\
+ SF(OTG0_OTG_V_TOTAL_CONTROL, OTG_FORCE_LOCK_ON_EVENT, mask_sh),\
+ SF(OTG0_OTG_V_TOTAL_CONTROL, OTG_SET_V_TOTAL_MIN_MASK_EN, mask_sh),\
+ SF(OTG0_OTG_V_TOTAL_CONTROL, OTG_SET_V_TOTAL_MIN_MASK, mask_sh),\
+ SF(OTG0_OTG_V_TOTAL_CONTROL, OTG_VTOTAL_MID_REPLACING_MIN_EN, mask_sh),\
+ SF(OTG0_OTG_V_TOTAL_CONTROL, OTG_VTOTAL_MID_REPLACING_MAX_EN, mask_sh),\
+ SF(OTG0_OTG_FORCE_COUNT_NOW_CNTL, OTG_FORCE_COUNT_NOW_CLEAR, mask_sh),\
+ SF(OTG0_OTG_FORCE_COUNT_NOW_CNTL, OTG_FORCE_COUNT_NOW_MODE, mask_sh),\
+ SF(OTG0_OTG_FORCE_COUNT_NOW_CNTL, OTG_FORCE_COUNT_NOW_OCCURRED, mask_sh),\
+ SF(OTG0_OTG_TRIGA_CNTL, OTG_TRIGA_SOURCE_SELECT, mask_sh),\
+ SF(OTG0_OTG_TRIGA_CNTL, OTG_TRIGA_SOURCE_PIPE_SELECT, mask_sh),\
+ SF(OTG0_OTG_TRIGA_CNTL, OTG_TRIGA_RISING_EDGE_DETECT_CNTL, mask_sh),\
+ SF(OTG0_OTG_TRIGA_CNTL, OTG_TRIGA_FALLING_EDGE_DETECT_CNTL, mask_sh),\
+ SF(OTG0_OTG_TRIGA_CNTL, OTG_TRIGA_POLARITY_SELECT, mask_sh),\
+ SF(OTG0_OTG_TRIGA_CNTL, OTG_TRIGA_FREQUENCY_SELECT, mask_sh),\
+ SF(OTG0_OTG_TRIGA_CNTL, OTG_TRIGA_DELAY, mask_sh),\
+ SF(OTG0_OTG_TRIGA_CNTL, OTG_TRIGA_CLEAR, mask_sh),\
+ SF(OTG0_OTG_STATIC_SCREEN_CONTROL, OTG_STATIC_SCREEN_EVENT_MASK, mask_sh),\
+ SF(OTG0_OTG_STATIC_SCREEN_CONTROL, OTG_STATIC_SCREEN_FRAME_COUNT, mask_sh),\
+ SF(OTG0_OTG_STATUS_FRAME_COUNT, OTG_FRAME_COUNT, mask_sh),\
+ SF(OTG0_OTG_STATUS, OTG_V_BLANK, mask_sh),\
+ SF(OTG0_OTG_STATUS, OTG_V_ACTIVE_DISP, mask_sh),\
+ SF(OTG0_OTG_STATUS_POSITION, OTG_HORZ_COUNT, mask_sh),\
+ SF(OTG0_OTG_STATUS_POSITION, OTG_VERT_COUNT, mask_sh),\
+ SF(OTG0_OTG_NOM_VERT_POSITION, OTG_VERT_COUNT_NOM, mask_sh),\
+ SF(OTG0_OTG_BLANK_DATA_COLOR, OTG_BLANK_DATA_COLOR_BLUE_CB, mask_sh),\
+ SF(OTG0_OTG_BLANK_DATA_COLOR, OTG_BLANK_DATA_COLOR_GREEN_Y, mask_sh),\
+ SF(OTG0_OTG_BLANK_DATA_COLOR, OTG_BLANK_DATA_COLOR_RED_CR, mask_sh),\
+ SF(OTG0_OTG_BLANK_DATA_COLOR_EXT, OTG_BLANK_DATA_COLOR_BLUE_CB_EXT, mask_sh),\
+ SF(OTG0_OTG_BLANK_DATA_COLOR_EXT, OTG_BLANK_DATA_COLOR_GREEN_Y_EXT, mask_sh),\
+ SF(OTG0_OTG_BLANK_DATA_COLOR_EXT, OTG_BLANK_DATA_COLOR_RED_CR_EXT, mask_sh),\
+ SF(OTG0_OTG_M_CONST_DTO0, OTG_M_CONST_DTO_PHASE, mask_sh),\
+ SF(OTG0_OTG_M_CONST_DTO1, OTG_M_CONST_DTO_MODULO, mask_sh),\
+ SF(OTG0_OTG_CLOCK_CONTROL, OTG_BUSY, mask_sh),\
+ SF(OTG0_OTG_CLOCK_CONTROL, OTG_CLOCK_EN, mask_sh),\
+ SF(OTG0_OTG_CLOCK_CONTROL, OTG_CLOCK_ON, mask_sh),\
+ SF(OTG0_OTG_CLOCK_CONTROL, OTG_CLOCK_GATE_DIS, mask_sh),\
+ SF(OTG0_OTG_VERTICAL_INTERRUPT0_CONTROL, OTG_VERTICAL_INTERRUPT0_INT_ENABLE, mask_sh),\
+ SF(OTG0_OTG_VERTICAL_INTERRUPT0_POSITION, OTG_VERTICAL_INTERRUPT0_LINE_START, mask_sh),\
+ SF(OTG0_OTG_VERTICAL_INTERRUPT0_POSITION, OTG_VERTICAL_INTERRUPT0_LINE_END, mask_sh),\
+ SF(OTG0_OTG_VERTICAL_INTERRUPT1_CONTROL, OTG_VERTICAL_INTERRUPT1_INT_ENABLE, mask_sh),\
+ SF(OTG0_OTG_VERTICAL_INTERRUPT1_POSITION, OTG_VERTICAL_INTERRUPT1_LINE_START, mask_sh),\
+ SF(OTG0_OTG_VERTICAL_INTERRUPT2_CONTROL, OTG_VERTICAL_INTERRUPT2_INT_ENABLE, mask_sh),\
+ SF(OTG0_OTG_VERTICAL_INTERRUPT2_POSITION, OTG_VERTICAL_INTERRUPT2_LINE_START, mask_sh),\
+ SF(ODM0_OPTC_INPUT_CLOCK_CONTROL, OPTC_INPUT_CLK_EN, mask_sh),\
+ SF(ODM0_OPTC_INPUT_CLOCK_CONTROL, OPTC_INPUT_CLK_ON, mask_sh),\
+ SF(ODM0_OPTC_INPUT_CLOCK_CONTROL, OPTC_INPUT_CLK_GATE_DIS, mask_sh),\
+ SF(ODM0_OPTC_INPUT_GLOBAL_CONTROL, OPTC_UNDERFLOW_OCCURRED_STATUS, mask_sh),\
+ SF(ODM0_OPTC_INPUT_GLOBAL_CONTROL, OPTC_UNDERFLOW_CLEAR, mask_sh),\
+ SF(VTG0_CONTROL, VTG0_ENABLE, mask_sh),\
+ SF(VTG0_CONTROL, VTG0_FP2, mask_sh),\
+ SF(VTG0_CONTROL, VTG0_VCOUNT_INIT, mask_sh),\
+ SF(OTG0_OTG_VERT_SYNC_CONTROL, OTG_FORCE_VSYNC_NEXT_LINE_OCCURRED, mask_sh),\
+ SF(OTG0_OTG_VERT_SYNC_CONTROL, OTG_FORCE_VSYNC_NEXT_LINE_CLEAR, mask_sh),\
+ SF(OTG0_OTG_VERT_SYNC_CONTROL, OTG_AUTO_FORCE_VSYNC_MODE, mask_sh),\
+ SF(OTG0_OTG_GSL_CONTROL, OTG_GSL0_EN, mask_sh),\
+ SF(OTG0_OTG_GSL_CONTROL, OTG_GSL1_EN, mask_sh),\
+ SF(OTG0_OTG_GSL_CONTROL, OTG_GSL2_EN, mask_sh),\
+ SF(OTG0_OTG_GSL_CONTROL, OTG_GSL_MASTER_EN, mask_sh),\
+ SF(OTG0_OTG_GSL_CONTROL, OTG_GSL_FORCE_DELAY, mask_sh),\
+ SF(OTG0_OTG_GSL_CONTROL, OTG_GSL_CHECK_ALL_FIELDS, mask_sh),\
+ SF(OTG0_OTG_CRC_CNTL, OTG_CRC_CONT_EN, mask_sh),\
+ SF(OTG0_OTG_CRC_CNTL, OTG_CRC0_SELECT, mask_sh),\
+ SF(OTG0_OTG_CRC_CNTL, OTG_CRC_EN, mask_sh),\
+ SF(OTG0_OTG_CRC0_DATA_RG, CRC0_R_CR, mask_sh),\
+ SF(OTG0_OTG_CRC0_DATA_RG, CRC0_G_Y, mask_sh),\
+ SF(OTG0_OTG_CRC0_DATA_B, CRC0_B_CB, mask_sh),\
+ SF(OTG0_OTG_CRC0_WINDOWA_X_CONTROL, OTG_CRC0_WINDOWA_X_START, mask_sh),\
+ SF(OTG0_OTG_CRC0_WINDOWA_X_CONTROL, OTG_CRC0_WINDOWA_X_END, mask_sh),\
+ SF(OTG0_OTG_CRC0_WINDOWA_Y_CONTROL, OTG_CRC0_WINDOWA_Y_START, mask_sh),\
+ SF(OTG0_OTG_CRC0_WINDOWA_Y_CONTROL, OTG_CRC0_WINDOWA_Y_END, mask_sh),\
+ SF(OTG0_OTG_CRC0_WINDOWB_X_CONTROL, OTG_CRC0_WINDOWB_X_START, mask_sh),\
+ SF(OTG0_OTG_CRC0_WINDOWB_X_CONTROL, OTG_CRC0_WINDOWB_X_END, mask_sh),\
+ SF(OTG0_OTG_CRC0_WINDOWB_Y_CONTROL, OTG_CRC0_WINDOWB_Y_START, mask_sh),\
+ SF(OTG0_OTG_CRC0_WINDOWB_Y_CONTROL, OTG_CRC0_WINDOWB_Y_END, mask_sh),\
+ SF(OTG0_OTG_TRIGA_MANUAL_TRIG, OTG_TRIGA_MANUAL_TRIG, mask_sh),\
+ SF(GSL_SOURCE_SELECT, GSL0_READY_SOURCE_SEL, mask_sh),\
+ SF(GSL_SOURCE_SELECT, GSL1_READY_SOURCE_SEL, mask_sh),\
+ SF(GSL_SOURCE_SELECT, GSL2_READY_SOURCE_SEL, mask_sh),\
+ SF(OTG0_OTG_GLOBAL_CONTROL2, MANUAL_FLOW_CONTROL_SEL, mask_sh)
+
+#define OPTC_COMMON_MASK_SH_LIST_DCN3_0(mask_sh)\
+ OPTC_COMMON_MASK_SH_LIST_DCN3_BASE(mask_sh),\
+ SF(OTG0_OTG_GLOBAL_CONTROL2, GLOBAL_UPDATE_LOCK_EN, mask_sh),\
+ SF(OTG0_OTG_GSL_WINDOW_X, OTG_GSL_WINDOW_START_X, mask_sh),\
+ SF(OTG0_OTG_GSL_WINDOW_X, OTG_GSL_WINDOW_END_X, mask_sh), \
+ SF(OTG0_OTG_GSL_WINDOW_Y, OTG_GSL_WINDOW_START_Y, mask_sh),\
+ SF(OTG0_OTG_GSL_WINDOW_Y, OTG_GSL_WINDOW_END_Y, mask_sh),\
+ SF(OTG0_OTG_VUPDATE_KEEPOUT, OTG_MASTER_UPDATE_LOCK_VUPDATE_KEEPOUT_EN, mask_sh), \
+ SF(OTG0_OTG_VUPDATE_KEEPOUT, MASTER_UPDATE_LOCK_VUPDATE_KEEPOUT_START_OFFSET, mask_sh), \
+ SF(OTG0_OTG_VUPDATE_KEEPOUT, MASTER_UPDATE_LOCK_VUPDATE_KEEPOUT_END_OFFSET, mask_sh), \
+ SF(OTG0_OTG_GSL_CONTROL, OTG_GSL_MASTER_MODE, mask_sh), \
+ SF(OTG0_OTG_GSL_CONTROL, OTG_MASTER_UPDATE_LOCK_GSL_EN, mask_sh), \
+ SF(OTG0_OTG_DSC_START_POSITION, OTG_DSC_START_POSITION_X, mask_sh), \
+ SF(OTG0_OTG_DSC_START_POSITION, OTG_DSC_START_POSITION_LINE_NUM, mask_sh),\
+ SF(ODM0_OPTC_DATA_SOURCE_SELECT, OPTC_SEG0_SRC_SEL, mask_sh),\
+ SF(ODM0_OPTC_DATA_SOURCE_SELECT, OPTC_SEG1_SRC_SEL, mask_sh),\
+ SF(ODM0_OPTC_DATA_SOURCE_SELECT, OPTC_NUM_OF_INPUT_SEGMENT, mask_sh),\
+ SF(ODM0_OPTC_MEMORY_CONFIG, OPTC_MEM_SEL, mask_sh),\
+ SF(ODM0_OPTC_DATA_FORMAT_CONTROL, OPTC_DATA_FORMAT, mask_sh),\
+ SF(ODM0_OPTC_DATA_FORMAT_CONTROL, OPTC_DSC_MODE, mask_sh),\
+ SF(ODM0_OPTC_BYTES_PER_PIXEL, OPTC_DSC_BYTES_PER_PIXEL, mask_sh),\
+ SF(ODM0_OPTC_WIDTH_CONTROL, OPTC_DSC_SLICE_WIDTH, mask_sh),\
+ SF(ODM0_OPTC_WIDTH_CONTROL, OPTC_SEGMENT_WIDTH, mask_sh),\
+ SF(DWB_SOURCE_SELECT, OPTC_DWB0_SOURCE_SELECT, mask_sh),\
+ SF(DWB_SOURCE_SELECT, OPTC_DWB1_SOURCE_SELECT, mask_sh),\
+ SF(OTG0_OTG_DRR_TRIGGER_WINDOW, OTG_DRR_TRIGGER_WINDOW_START_X, mask_sh),\
+ SF(OTG0_OTG_DRR_TRIGGER_WINDOW, OTG_DRR_TRIGGER_WINDOW_END_X, mask_sh),\
+ SF(OTG0_OTG_DRR_V_TOTAL_CHANGE, OTG_DRR_V_TOTAL_CHANGE_LIMIT, mask_sh),\
+ SF(OTG0_OTG_H_TIMING_CNTL, OTG_H_TIMING_DIV_BY2, mask_sh),\
+ SF(OTG0_OTG_DOUBLE_BUFFER_CONTROL, OTG_DRR_TIMING_DBUF_UPDATE_MODE, mask_sh),\
+ SF(OTG0_OTG_DOUBLE_BUFFER_CONTROL, OTG_BLANK_DATA_DOUBLE_BUFFER_EN, mask_sh)
+
+#define OPTC_COMMON_MASK_SH_LIST_DCN30(mask_sh)\
+ OPTC_COMMON_MASK_SH_LIST_DCN3_BASE(mask_sh),\
+ SF(OTG0_OTG_GLOBAL_CONTROL2, GLOBAL_UPDATE_LOCK_EN, mask_sh),\
+ SF(OTG0_OTG_GSL_WINDOW_X, OTG_GSL_WINDOW_START_X, mask_sh),\
+ SF(OTG0_OTG_GSL_WINDOW_X, OTG_GSL_WINDOW_END_X, mask_sh), \
+ SF(OTG0_OTG_GSL_WINDOW_Y, OTG_GSL_WINDOW_START_Y, mask_sh),\
+ SF(OTG0_OTG_GSL_WINDOW_Y, OTG_GSL_WINDOW_END_Y, mask_sh),\
+ SF(OTG0_OTG_VUPDATE_KEEPOUT, OTG_MASTER_UPDATE_LOCK_VUPDATE_KEEPOUT_EN, mask_sh), \
+ SF(OTG0_OTG_VUPDATE_KEEPOUT, MASTER_UPDATE_LOCK_VUPDATE_KEEPOUT_START_OFFSET, mask_sh), \
+ SF(OTG0_OTG_VUPDATE_KEEPOUT, MASTER_UPDATE_LOCK_VUPDATE_KEEPOUT_END_OFFSET, mask_sh), \
+ SF(OTG0_OTG_GSL_CONTROL, OTG_GSL_MASTER_MODE, mask_sh), \
+ SF(OTG0_OTG_GSL_CONTROL, OTG_MASTER_UPDATE_LOCK_GSL_EN, mask_sh), \
+ SF(OTG0_OTG_DSC_START_POSITION, OTG_DSC_START_POSITION_X, mask_sh), \
+ SF(OTG0_OTG_DSC_START_POSITION, OTG_DSC_START_POSITION_LINE_NUM, mask_sh),\
+ SF(ODM0_OPTC_DATA_SOURCE_SELECT, OPTC_SEG0_SRC_SEL, mask_sh),\
+ SF(ODM0_OPTC_DATA_SOURCE_SELECT, OPTC_SEG1_SRC_SEL, mask_sh),\
+ SF(ODM0_OPTC_DATA_SOURCE_SELECT, OPTC_SEG2_SRC_SEL, mask_sh),\
+ SF(ODM0_OPTC_DATA_SOURCE_SELECT, OPTC_SEG3_SRC_SEL, mask_sh),\
+ SF(ODM0_OPTC_DATA_SOURCE_SELECT, OPTC_NUM_OF_INPUT_SEGMENT, mask_sh),\
+ SF(ODM0_OPTC_MEMORY_CONFIG, OPTC_MEM_SEL, mask_sh),\
+ SF(ODM0_OPTC_DATA_FORMAT_CONTROL, OPTC_DATA_FORMAT, mask_sh),\
+ SF(ODM0_OPTC_DATA_FORMAT_CONTROL, OPTC_DSC_MODE, mask_sh),\
+ SF(ODM0_OPTC_BYTES_PER_PIXEL, OPTC_DSC_BYTES_PER_PIXEL, mask_sh),\
+ SF(ODM0_OPTC_WIDTH_CONTROL, OPTC_DSC_SLICE_WIDTH, mask_sh),\
+ SF(ODM0_OPTC_WIDTH_CONTROL, OPTC_SEGMENT_WIDTH, mask_sh),\
+ SF(DWB_SOURCE_SELECT, OPTC_DWB0_SOURCE_SELECT, mask_sh),\
+ SF(DWB_SOURCE_SELECT, OPTC_DWB1_SOURCE_SELECT, mask_sh),\
+ SF(OTG0_OTG_DRR_TRIGGER_WINDOW, OTG_DRR_TRIGGER_WINDOW_START_X, mask_sh),\
+ SF(OTG0_OTG_DRR_TRIGGER_WINDOW, OTG_DRR_TRIGGER_WINDOW_END_X, mask_sh),\
+ SF(OTG0_OTG_DRR_V_TOTAL_CHANGE, OTG_DRR_V_TOTAL_CHANGE_LIMIT, mask_sh),\
+ SF(OTG0_OTG_H_TIMING_CNTL, OTG_H_TIMING_DIV_MODE, mask_sh),\
+ SF(OTG0_OTG_DOUBLE_BUFFER_CONTROL, OTG_DRR_TIMING_DBUF_UPDATE_MODE, mask_sh)
+
+void dcn30_timing_generator_init(struct optc *optc1);
+
+void optc3_set_out_mux(struct timing_generator *optc, enum otg_out_mux_dest dest);
+
+void optc3_lock(struct timing_generator *optc);
+
+void optc3_lock_doublebuffer_enable(struct timing_generator *optc);
+
+void optc3_lock_doublebuffer_disable(struct timing_generator *optc);
+
+void optc3_set_vrr_m_const(struct timing_generator *optc,
+ double vtotal_avg);
+
+void optc3_set_drr_trigger_window(struct timing_generator *optc,
+ uint32_t window_start, uint32_t window_end);
+
+void optc3_triplebuffer_lock(struct timing_generator *optc);
+
+void optc3_program_blank_color(struct timing_generator *optc,
+ const struct tg_color *blank_color);
+
+void optc3_set_vtotal_change_limit(struct timing_generator *optc,
+ uint32_t limit);
+
+void optc3_set_dsc_config(struct timing_generator *optc,
+ enum optc_dsc_mode dsc_mode,
+ uint32_t dsc_bytes_per_pixel,
+ uint32_t dsc_slice_width);
+
+void optc3_set_timing_db_mode(struct timing_generator *optc, bool enable);
+
+#endif /* __DC_OPTC_DCN30_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_resource.c b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_resource.c
new file mode 100644
index 000000000000..d7ba895de765
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_resource.c
@@ -0,0 +1,2691 @@
+/*
+ * Copyright 2020 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+
+#include "dm_services.h"
+#include "dc.h"
+
+#include "dcn30_init.h"
+
+#include "resource.h"
+#include "include/irq_service_interface.h"
+#include "dcn20/dcn20_resource.h"
+
+#include "dcn30_resource.h"
+
+#include "dcn10/dcn10_ipp.h"
+#include "dcn30/dcn30_hubbub.h"
+#include "dcn30/dcn30_mpc.h"
+#include "dcn30/dcn30_hubp.h"
+#include "irq/dcn30/irq_service_dcn30.h"
+#include "dcn30/dcn30_dpp.h"
+#include "dcn30/dcn30_optc.h"
+#include "dcn20/dcn20_hwseq.h"
+#include "dcn30/dcn30_hwseq.h"
+#include "dce110/dce110_hw_sequencer.h"
+#include "dcn30/dcn30_opp.h"
+#include "dcn20/dcn20_dsc.h"
+#include "dcn30/dcn30_vpg.h"
+#include "dcn30/dcn30_afmt.h"
+#include "dcn30/dcn30_dio_stream_encoder.h"
+#include "dcn30/dcn30_dio_link_encoder.h"
+#include "dce/dce_clock_source.h"
+#include "dce/dce_audio.h"
+#include "dce/dce_hwseq.h"
+#include "clk_mgr.h"
+#include "virtual/virtual_stream_encoder.h"
+#include "dce110/dce110_resource.h"
+#include "dml/display_mode_vba.h"
+#include "dcn30/dcn30_dccg.h"
+#include "dcn10/dcn10_resource.h"
+#include "dce/dce_panel_cntl.h"
+
+#include "dcn30/dcn30_dwb.h"
+#include "dcn30/dcn30_mmhubbub.h"
+
+#include "sienna_cichlid_ip_offset.h"
+#include "dcn/dcn_3_0_0_offset.h"
+#include "dcn/dcn_3_0_0_sh_mask.h"
+
+#include "nbio/nbio_7_4_offset.h"
+
+#include "dcn/dpcs_3_0_0_offset.h"
+#include "dcn/dpcs_3_0_0_sh_mask.h"
+
+#include "mmhub/mmhub_2_0_0_offset.h"
+#include "mmhub/mmhub_2_0_0_sh_mask.h"
+
+#include "reg_helper.h"
+#include "dce/dmub_abm.h"
+#include "dce/dce_aux.h"
+#include "dce/dce_i2c.h"
+
+#include "dml/dcn30/display_mode_vba_30.h"
+#include "vm_helper.h"
+#include "dcn20/dcn20_vmid.h"
+#include "amdgpu_socbb.h"
+
+#define DC_LOGGER_INIT(logger)
+
+struct _vcs_dpi_ip_params_st dcn3_0_ip = {
+ .use_min_dcfclk = 1,
+ .clamp_min_dcfclk = 0,
+ .odm_capable = 1,
+ .gpuvm_enable = 0,
+ .hostvm_enable = 0,
+ .gpuvm_max_page_table_levels = 4,
+ .hostvm_max_page_table_levels = 4,
+ .hostvm_cached_page_table_levels = 0,
+ .pte_group_size_bytes = 2048,
+ .num_dsc = 6,
+ .rob_buffer_size_kbytes = 184,
+ .det_buffer_size_kbytes = 184,
+ .dpte_buffer_size_in_pte_reqs_luma = 84,
+ .pde_proc_buffer_size_64k_reqs = 48,
+ .dpp_output_buffer_pixels = 2560,
+ .opp_output_buffer_lines = 1,
+ .pixel_chunk_size_kbytes = 8,
+ .pte_enable = 1,
+ .max_page_table_levels = 2,
+ .pte_chunk_size_kbytes = 2, // ?
+ .meta_chunk_size_kbytes = 2,
+ .writeback_chunk_size_kbytes = 8,
+ .line_buffer_size_bits = 789504,
+ .is_line_buffer_bpp_fixed = 0, // ?
+ .line_buffer_fixed_bpp = 0, // ?
+ .dcc_supported = true,
+ .writeback_interface_buffer_size_kbytes = 90,
+ .writeback_line_buffer_buffer_size = 0,
+ .max_line_buffer_lines = 12,
+ .writeback_luma_buffer_size_kbytes = 12, // writeback_line_buffer_buffer_size = 656640
+ .writeback_chroma_buffer_size_kbytes = 8,
+ .writeback_chroma_line_buffer_width_pixels = 4,
+ .writeback_max_hscl_ratio = 1,
+ .writeback_max_vscl_ratio = 1,
+ .writeback_min_hscl_ratio = 1,
+ .writeback_min_vscl_ratio = 1,
+ .writeback_max_hscl_taps = 1,
+ .writeback_max_vscl_taps = 1,
+ .writeback_line_buffer_luma_buffer_size = 0,
+ .writeback_line_buffer_chroma_buffer_size = 14643,
+ .cursor_buffer_size = 8,
+ .cursor_chunk_size = 2,
+ .max_num_otg = 6,
+ .max_num_dpp = 6,
+ .max_num_wb = 1,
+ .max_dchub_pscl_bw_pix_per_clk = 4,
+ .max_pscl_lb_bw_pix_per_clk = 2,
+ .max_lb_vscl_bw_pix_per_clk = 4,
+ .max_vscl_hscl_bw_pix_per_clk = 4,
+ .max_hscl_ratio = 6,
+ .max_vscl_ratio = 6,
+ .hscl_mults = 4,
+ .vscl_mults = 4,
+ .max_hscl_taps = 8,
+ .max_vscl_taps = 8,
+ .dispclk_ramp_margin_percent = 1,
+ .underscan_factor = 1.11,
+ .min_vblank_lines = 32,
+ .dppclk_delay_subtotal = 46,
+ .dynamic_metadata_vm_enabled = true,
+ .dppclk_delay_scl_lb_only = 16,
+ .dppclk_delay_scl = 50,
+ .dppclk_delay_cnvc_formatter = 27,
+ .dppclk_delay_cnvc_cursor = 6,
+ .dispclk_delay_subtotal = 119,
+ .dcfclk_cstate_latency = 5.2, // SRExitTime
+ .max_inter_dcn_tile_repeaters = 8,
+ .odm_combine_4to1_supported = true,
+
+ .xfc_supported = false,
+ .xfc_fill_bw_overhead_percent = 10.0,
+ .xfc_fill_constant_bytes = 0,
+ .gfx7_compat_tiling_supported = 0,
+ .number_of_cursors = 1,
+};
+
+struct _vcs_dpi_soc_bounding_box_st dcn3_0_soc = {
+ .clock_limits = {
+ {
+ .state = 0,
+ .dispclk_mhz = 562.0,
+ .dppclk_mhz = 300.0,
+ .phyclk_mhz = 300.0,
+ .phyclk_d18_mhz = 667.0,
+ .dscclk_mhz = 405.6,
+ },
+ },
+ .min_dcfclk = 500.0, /* TODO: set this to actual min DCFCLK */
+ .num_states = 1,
+ .sr_exit_time_us = 12,
+ .sr_enter_plus_exit_time_us = 20,
+ .urgent_latency_us = 4.0,
+ .urgent_latency_pixel_data_only_us = 4.0,
+ .urgent_latency_pixel_mixed_with_vm_data_us = 4.0,
+ .urgent_latency_vm_data_only_us = 4.0,
+ .urgent_out_of_order_return_per_channel_pixel_only_bytes = 4096,
+ .urgent_out_of_order_return_per_channel_pixel_and_vm_bytes = 4096,
+ .urgent_out_of_order_return_per_channel_vm_only_bytes = 4096,
+ .pct_ideal_dram_sdp_bw_after_urgent_pixel_only = 80.0,
+ .pct_ideal_dram_sdp_bw_after_urgent_pixel_and_vm = 60.0,
+ .pct_ideal_dram_sdp_bw_after_urgent_vm_only = 40.0,
+ .max_avg_sdp_bw_use_normal_percent = 60.0,
+ .max_avg_dram_bw_use_normal_percent = 40.0,
+ .writeback_latency_us = 12.0,
+ .max_request_size_bytes = 256,
+ .fabric_datapath_to_dcn_data_return_bytes = 64,
+ .dcn_downspread_percent = 0.5,
+ .downspread_percent = 0.38,
+ .dram_page_open_time_ns = 50.0,
+ .dram_rw_turnaround_time_ns = 17.5,
+ .dram_return_buffer_per_channel_bytes = 8192,
+ .round_trip_ping_latency_dcfclk_cycles = 191,
+ .urgent_out_of_order_return_per_channel_bytes = 4096,
+ .channel_interleave_bytes = 256,
+ .num_banks = 8,
+ .gpuvm_min_page_size_bytes = 4096,
+ .hostvm_min_page_size_bytes = 4096,
+ .dram_clock_change_latency_us = 404,
+ .dummy_pstate_latency_us = 5,
+ .writeback_dram_clock_change_latency_us = 23.0,
+ .return_bus_width_bytes = 64,
+ .dispclk_dppclk_vco_speed_mhz = 3650,
+ .xfc_bus_transport_time_us = 20, // ?
+ .xfc_xbuf_latency_tolerance_us = 4, // ?
+ .use_urgent_burst_bw = 1, // ?
+ .do_urgent_latency_adjustment = true,
+ .urgent_latency_adjustment_fabric_clock_component_us = 1.0,
+ .urgent_latency_adjustment_fabric_clock_reference_mhz = 1000,
+};
+
+enum dcn30_clk_src_array_id {
+ DCN30_CLK_SRC_PLL0,
+ DCN30_CLK_SRC_PLL1,
+ DCN30_CLK_SRC_PLL2,
+ DCN30_CLK_SRC_PLL3,
+ DCN30_CLK_SRC_PLL4,
+ DCN30_CLK_SRC_PLL5,
+ DCN30_CLK_SRC_TOTAL
+};
+
+/* begin *********************
+ * macros to expend register list macro defined in HW object header file
+ */
+
+/* DCN */
+/* TODO awful hack. fixup dcn20_dwb.h */
+#undef BASE_INNER
+#define BASE_INNER(seg) DCN_BASE__INST0_SEG ## seg
+
+#define BASE(seg) BASE_INNER(seg)
+
+#define SR(reg_name)\
+ .reg_name = BASE(mm ## reg_name ## _BASE_IDX) + \
+ mm ## reg_name
+
+#define SRI(reg_name, block, id)\
+ .reg_name = BASE(mm ## block ## id ## _ ## reg_name ## _BASE_IDX) + \
+ mm ## block ## id ## _ ## reg_name
+
+#define SRI2(reg_name, block, id)\
+ .reg_name = BASE(mm ## reg_name ## _BASE_IDX) + \
+ mm ## reg_name
+
+#define SRIR(var_name, reg_name, block, id)\
+ .var_name = BASE(mm ## block ## id ## _ ## reg_name ## _BASE_IDX) + \
+ mm ## block ## id ## _ ## reg_name
+
+#define SRII(reg_name, block, id)\
+ .reg_name[id] = BASE(mm ## block ## id ## _ ## reg_name ## _BASE_IDX) + \
+ mm ## block ## id ## _ ## reg_name
+
+#define SRII_MPC_RMU(reg_name, block, id)\
+ .RMU##_##reg_name[id] = BASE(mm ## block ## id ## _ ## reg_name ## _BASE_IDX) + \
+ mm ## block ## id ## _ ## reg_name
+
+#define SRII_DWB(reg_name, temp_name, block, id)\
+ .reg_name[id] = BASE(mm ## block ## id ## _ ## temp_name ## _BASE_IDX) + \
+ mm ## block ## id ## _ ## temp_name
+
+#define DCCG_SRII(reg_name, block, id)\
+ .block ## _ ## reg_name[id] = BASE(mm ## block ## id ## _ ## reg_name ## _BASE_IDX) + \
+ mm ## block ## id ## _ ## reg_name
+
+#define VUPDATE_SRII(reg_name, block, id)\
+ .reg_name[id] = BASE(mm ## reg_name ## _ ## block ## id ## _BASE_IDX) + \
+ mm ## reg_name ## _ ## block ## id
+
+/* NBIO */
+#define NBIO_BASE_INNER(seg) \
+ NBIO_BASE__INST0_SEG ## seg
+
+#define NBIO_BASE(seg) \
+ NBIO_BASE_INNER(seg)
+
+#define NBIO_SR(reg_name)\
+ .reg_name = NBIO_BASE(mm ## reg_name ## _BASE_IDX) + \
+ mm ## reg_name
+
+/* MMHUB */
+#define MMHUB_BASE_INNER(seg) \
+ MMHUB_BASE__INST0_SEG ## seg
+
+#define MMHUB_BASE(seg) \
+ MMHUB_BASE_INNER(seg)
+
+#define MMHUB_SR(reg_name)\
+ .reg_name = MMHUB_BASE(mmMM ## reg_name ## _BASE_IDX) + \
+ mmMM ## reg_name
+
+/* CLOCK */
+#define CLK_BASE_INNER(seg) \
+ CLK_BASE__INST0_SEG ## seg
+
+#define CLK_BASE(seg) \
+ CLK_BASE_INNER(seg)
+
+#define CLK_SRI(reg_name, block, inst)\
+ .reg_name = CLK_BASE(mm ## block ## _ ## inst ## _ ## reg_name ## _BASE_IDX) + \
+ mm ## block ## _ ## inst ## _ ## reg_name
+
+
+static const struct bios_registers bios_regs = {
+ NBIO_SR(BIOS_SCRATCH_3),
+ NBIO_SR(BIOS_SCRATCH_6)
+};
+
+#define clk_src_regs(index, pllid)\
+[index] = {\
+ CS_COMMON_REG_LIST_DCN2_0(index, pllid),\
+}
+
+static const struct dce110_clk_src_regs clk_src_regs[] = {
+ clk_src_regs(0, A),
+ clk_src_regs(1, B),
+ clk_src_regs(2, C),
+ clk_src_regs(3, D),
+ clk_src_regs(4, E),
+ clk_src_regs(5, F)
+};
+
+static const struct dce110_clk_src_shift cs_shift = {
+ CS_COMMON_MASK_SH_LIST_DCN2_0(__SHIFT)
+};
+
+static const struct dce110_clk_src_mask cs_mask = {
+ CS_COMMON_MASK_SH_LIST_DCN2_0(_MASK)
+};
+
+#define abm_regs(id)\
+[id] = {\
+ ABM_DCN301_REG_LIST(id)\
+}
+
+static const struct dce_abm_registers abm_regs[] = {
+ abm_regs(0),
+ abm_regs(1),
+ abm_regs(2),
+ abm_regs(3),
+ abm_regs(4),
+ abm_regs(5),
+};
+
+static const struct dce_abm_shift abm_shift = {
+ ABM_MASK_SH_LIST_DCN301(__SHIFT)
+};
+
+static const struct dce_abm_mask abm_mask = {
+ ABM_MASK_SH_LIST_DCN301(_MASK)
+};
+
+
+
+#define audio_regs(id)\
+[id] = {\
+ AUD_COMMON_REG_LIST(id)\
+}
+
+static const struct dce_audio_registers audio_regs[] = {
+ audio_regs(0),
+ audio_regs(1),
+ audio_regs(2),
+ audio_regs(3),
+ audio_regs(4),
+ audio_regs(5),
+ audio_regs(6)
+};
+
+#define DCE120_AUD_COMMON_MASK_SH_LIST(mask_sh)\
+ SF(AZF0ENDPOINT0_AZALIA_F0_CODEC_ENDPOINT_INDEX, AZALIA_ENDPOINT_REG_INDEX, mask_sh),\
+ SF(AZF0ENDPOINT0_AZALIA_F0_CODEC_ENDPOINT_DATA, AZALIA_ENDPOINT_REG_DATA, mask_sh),\
+ AUD_COMMON_MASK_SH_LIST_BASE(mask_sh)
+
+static const struct dce_audio_shift audio_shift = {
+ DCE120_AUD_COMMON_MASK_SH_LIST(__SHIFT)
+};
+
+static const struct dce_audio_mask audio_mask = {
+ DCE120_AUD_COMMON_MASK_SH_LIST(_MASK)
+};
+
+#define vpg_regs(id)\
+[id] = {\
+ VPG_DCN3_REG_LIST(id)\
+}
+
+static const struct dcn30_vpg_registers vpg_regs[] = {
+ vpg_regs(0),
+ vpg_regs(1),
+ vpg_regs(2),
+ vpg_regs(3),
+ vpg_regs(4),
+ vpg_regs(5),
+ vpg_regs(6),
+};
+
+static const struct dcn30_vpg_shift vpg_shift = {
+ DCN3_VPG_MASK_SH_LIST(__SHIFT)
+};
+
+static const struct dcn30_vpg_mask vpg_mask = {
+ DCN3_VPG_MASK_SH_LIST(_MASK)
+};
+
+#define afmt_regs(id)\
+[id] = {\
+ AFMT_DCN3_REG_LIST(id)\
+}
+
+static const struct dcn30_afmt_registers afmt_regs[] = {
+ afmt_regs(0),
+ afmt_regs(1),
+ afmt_regs(2),
+ afmt_regs(3),
+ afmt_regs(4),
+ afmt_regs(5),
+ afmt_regs(6),
+};
+
+static const struct dcn30_afmt_shift afmt_shift = {
+ DCN3_AFMT_MASK_SH_LIST(__SHIFT)
+};
+
+static const struct dcn30_afmt_mask afmt_mask = {
+ DCN3_AFMT_MASK_SH_LIST(_MASK)
+};
+
+#define stream_enc_regs(id)\
+[id] = {\
+ SE_DCN3_REG_LIST(id)\
+}
+
+static const struct dcn10_stream_enc_registers stream_enc_regs[] = {
+ stream_enc_regs(0),
+ stream_enc_regs(1),
+ stream_enc_regs(2),
+ stream_enc_regs(3),
+ stream_enc_regs(4),
+ stream_enc_regs(5)
+};
+
+static const struct dcn10_stream_encoder_shift se_shift = {
+ SE_COMMON_MASK_SH_LIST_DCN30(__SHIFT)
+};
+
+static const struct dcn10_stream_encoder_mask se_mask = {
+ SE_COMMON_MASK_SH_LIST_DCN30(_MASK)
+};
+
+
+#define aux_regs(id)\
+[id] = {\
+ DCN2_AUX_REG_LIST(id)\
+}
+
+static const struct dcn10_link_enc_aux_registers link_enc_aux_regs[] = {
+ aux_regs(0),
+ aux_regs(1),
+ aux_regs(2),
+ aux_regs(3),
+ aux_regs(4),
+ aux_regs(5)
+};
+
+#define hpd_regs(id)\
+[id] = {\
+ HPD_REG_LIST(id)\
+}
+
+static const struct dcn10_link_enc_hpd_registers link_enc_hpd_regs[] = {
+ hpd_regs(0),
+ hpd_regs(1),
+ hpd_regs(2),
+ hpd_regs(3),
+ hpd_regs(4),
+ hpd_regs(5)
+};
+
+#define link_regs(id, phyid)\
+[id] = {\
+ LE_DCN3_REG_LIST(id), \
+ UNIPHY_DCN2_REG_LIST(phyid), \
+}
+
+static const struct dce110_aux_registers_shift aux_shift = {
+ DCN_AUX_MASK_SH_LIST(__SHIFT)
+};
+
+static const struct dce110_aux_registers_mask aux_mask = {
+ DCN_AUX_MASK_SH_LIST(_MASK)
+};
+
+static const struct dcn10_link_enc_registers link_enc_regs[] = {
+ link_regs(0, A),
+ link_regs(1, B),
+ link_regs(2, C),
+ link_regs(3, D),
+ link_regs(4, E),
+ link_regs(5, F)
+};
+
+static const struct dcn10_link_enc_shift le_shift = {
+ LINK_ENCODER_MASK_SH_LIST_DCN30(__SHIFT),\
+ DPCS_DCN2_MASK_SH_LIST(__SHIFT)
+};
+
+static const struct dcn10_link_enc_mask le_mask = {
+ LINK_ENCODER_MASK_SH_LIST_DCN30(_MASK),\
+ DPCS_DCN2_MASK_SH_LIST(_MASK)
+};
+
+
+static const struct dce_panel_cntl_registers panel_cntl_regs[] = {
+ { DCN_PANEL_CNTL_REG_LIST() }
+};
+
+static const struct dce_panel_cntl_shift panel_cntl_shift = {
+ DCE_PANEL_CNTL_MASK_SH_LIST(__SHIFT)
+};
+
+static const struct dce_panel_cntl_mask panel_cntl_mask = {
+ DCE_PANEL_CNTL_MASK_SH_LIST(_MASK)
+};
+
+#define dpp_regs(id)\
+[id] = {\
+ DPP_REG_LIST_DCN30(id),\
+}
+
+static const struct dcn3_dpp_registers dpp_regs[] = {
+ dpp_regs(0),
+ dpp_regs(1),
+ dpp_regs(2),
+ dpp_regs(3),
+ dpp_regs(4),
+ dpp_regs(5),
+};
+
+static const struct dcn3_dpp_shift tf_shift = {
+ DPP_REG_LIST_SH_MASK_DCN30(__SHIFT)
+};
+
+static const struct dcn3_dpp_mask tf_mask = {
+ DPP_REG_LIST_SH_MASK_DCN30(_MASK)
+};
+
+#define opp_regs(id)\
+[id] = {\
+ OPP_REG_LIST_DCN30(id),\
+}
+
+static const struct dcn20_opp_registers opp_regs[] = {
+ opp_regs(0),
+ opp_regs(1),
+ opp_regs(2),
+ opp_regs(3),
+ opp_regs(4),
+ opp_regs(5)
+};
+
+static const struct dcn20_opp_shift opp_shift = {
+ OPP_MASK_SH_LIST_DCN20(__SHIFT)
+};
+
+static const struct dcn20_opp_mask opp_mask = {
+ OPP_MASK_SH_LIST_DCN20(_MASK)
+};
+
+#define aux_engine_regs(id)\
+[id] = {\
+ AUX_COMMON_REG_LIST0(id), \
+ .AUXN_IMPCAL = 0, \
+ .AUXP_IMPCAL = 0, \
+ .AUX_RESET_MASK = DP_AUX0_AUX_CONTROL__AUX_RESET_MASK, \
+}
+
+static const struct dce110_aux_registers aux_engine_regs[] = {
+ aux_engine_regs(0),
+ aux_engine_regs(1),
+ aux_engine_regs(2),
+ aux_engine_regs(3),
+ aux_engine_regs(4),
+ aux_engine_regs(5)
+};
+
+#define dwbc_regs_dcn3(id)\
+[id] = {\
+ DWBC_COMMON_REG_LIST_DCN30(id),\
+}
+
+static const struct dcn30_dwbc_registers dwbc30_regs[] = {
+ dwbc_regs_dcn3(0),
+};
+
+static const struct dcn30_dwbc_shift dwbc30_shift = {
+ DWBC_COMMON_MASK_SH_LIST_DCN30(__SHIFT)
+};
+
+static const struct dcn30_dwbc_mask dwbc30_mask = {
+ DWBC_COMMON_MASK_SH_LIST_DCN30(_MASK)
+};
+
+#define mcif_wb_regs_dcn3(id)\
+[id] = {\
+ MCIF_WB_COMMON_REG_LIST_DCN30(id),\
+}
+
+static const struct dcn30_mmhubbub_registers mcif_wb30_regs[] = {
+ mcif_wb_regs_dcn3(0)
+};
+
+static const struct dcn30_mmhubbub_shift mcif_wb30_shift = {
+ MCIF_WB_COMMON_MASK_SH_LIST_DCN30(__SHIFT)
+};
+
+static const struct dcn30_mmhubbub_mask mcif_wb30_mask = {
+ MCIF_WB_COMMON_MASK_SH_LIST_DCN30(_MASK)
+};
+
+#define dsc_regsDCN20(id)\
+[id] = {\
+ DSC_REG_LIST_DCN20(id)\
+}
+
+static const struct dcn20_dsc_registers dsc_regs[] = {
+ dsc_regsDCN20(0),
+ dsc_regsDCN20(1),
+ dsc_regsDCN20(2),
+ dsc_regsDCN20(3),
+ dsc_regsDCN20(4),
+ dsc_regsDCN20(5)
+};
+
+static const struct dcn20_dsc_shift dsc_shift = {
+ DSC_REG_LIST_SH_MASK_DCN20(__SHIFT)
+};
+
+static const struct dcn20_dsc_mask dsc_mask = {
+ DSC_REG_LIST_SH_MASK_DCN20(_MASK)
+};
+
+static const struct dcn30_mpc_registers mpc_regs = {
+ MPC_REG_LIST_DCN3_0(0),
+ MPC_REG_LIST_DCN3_0(1),
+ MPC_REG_LIST_DCN3_0(2),
+ MPC_REG_LIST_DCN3_0(3),
+ MPC_REG_LIST_DCN3_0(4),
+ MPC_REG_LIST_DCN3_0(5),
+ MPC_OUT_MUX_REG_LIST_DCN3_0(0),
+ MPC_OUT_MUX_REG_LIST_DCN3_0(1),
+ MPC_OUT_MUX_REG_LIST_DCN3_0(2),
+ MPC_OUT_MUX_REG_LIST_DCN3_0(3),
+ MPC_OUT_MUX_REG_LIST_DCN3_0(4),
+ MPC_OUT_MUX_REG_LIST_DCN3_0(5),
+ MPC_RMU_GLOBAL_REG_LIST_DCN3AG,
+ MPC_RMU_REG_LIST_DCN3AG(0),
+ MPC_RMU_REG_LIST_DCN3AG(1),
+ MPC_RMU_REG_LIST_DCN3AG(2),
+ MPC_DWB_MUX_REG_LIST_DCN3_0(0),
+};
+
+static const struct dcn30_mpc_shift mpc_shift = {
+ MPC_COMMON_MASK_SH_LIST_DCN30(__SHIFT)
+};
+
+static const struct dcn30_mpc_mask mpc_mask = {
+ MPC_COMMON_MASK_SH_LIST_DCN30(_MASK)
+};
+
+#define optc_regs(id)\
+[id] = {OPTC_COMMON_REG_LIST_DCN3_0(id)}
+
+
+static const struct dcn_optc_registers optc_regs[] = {
+ optc_regs(0),
+ optc_regs(1),
+ optc_regs(2),
+ optc_regs(3),
+ optc_regs(4),
+ optc_regs(5)
+};
+
+static const struct dcn_optc_shift optc_shift = {
+ OPTC_COMMON_MASK_SH_LIST_DCN30(__SHIFT)
+};
+
+static const struct dcn_optc_mask optc_mask = {
+ OPTC_COMMON_MASK_SH_LIST_DCN30(_MASK)
+};
+
+#define hubp_regs(id)\
+[id] = {\
+ HUBP_REG_LIST_DCN30(id)\
+}
+
+static const struct dcn_hubp2_registers hubp_regs[] = {
+ hubp_regs(0),
+ hubp_regs(1),
+ hubp_regs(2),
+ hubp_regs(3),
+ hubp_regs(4),
+ hubp_regs(5)
+};
+
+static const struct dcn_hubp2_shift hubp_shift = {
+ HUBP_MASK_SH_LIST_DCN30(__SHIFT)
+};
+
+static const struct dcn_hubp2_mask hubp_mask = {
+ HUBP_MASK_SH_LIST_DCN30(_MASK)
+};
+
+static const struct dcn_hubbub_registers hubbub_reg = {
+ HUBBUB_REG_LIST_DCN30(0)
+};
+
+static const struct dcn_hubbub_shift hubbub_shift = {
+ HUBBUB_MASK_SH_LIST_DCN30(__SHIFT)
+};
+
+static const struct dcn_hubbub_mask hubbub_mask = {
+ HUBBUB_MASK_SH_LIST_DCN30(_MASK)
+};
+
+static const struct dccg_registers dccg_regs = {
+ DCCG_REG_LIST_DCN30()
+};
+
+static const struct dccg_shift dccg_shift = {
+ DCCG_MASK_SH_LIST_DCN3(__SHIFT)
+};
+
+static const struct dccg_mask dccg_mask = {
+ DCCG_MASK_SH_LIST_DCN3(_MASK)
+};
+
+static const struct dce_hwseq_registers hwseq_reg = {
+ HWSEQ_DCN30_REG_LIST()
+};
+
+static const struct dce_hwseq_shift hwseq_shift = {
+ HWSEQ_DCN30_MASK_SH_LIST(__SHIFT)
+};
+
+static const struct dce_hwseq_mask hwseq_mask = {
+ HWSEQ_DCN30_MASK_SH_LIST(_MASK)
+};
+#define vmid_regs(id)\
+[id] = {\
+ DCN20_VMID_REG_LIST(id)\
+}
+
+static const struct dcn_vmid_registers vmid_regs[] = {
+ vmid_regs(0),
+ vmid_regs(1),
+ vmid_regs(2),
+ vmid_regs(3),
+ vmid_regs(4),
+ vmid_regs(5),
+ vmid_regs(6),
+ vmid_regs(7),
+ vmid_regs(8),
+ vmid_regs(9),
+ vmid_regs(10),
+ vmid_regs(11),
+ vmid_regs(12),
+ vmid_regs(13),
+ vmid_regs(14),
+ vmid_regs(15)
+};
+
+static const struct dcn20_vmid_shift vmid_shifts = {
+ DCN20_VMID_MASK_SH_LIST(__SHIFT)
+};
+
+static const struct dcn20_vmid_mask vmid_masks = {
+ DCN20_VMID_MASK_SH_LIST(_MASK)
+};
+
+static const struct resource_caps res_cap_dcn3 = {
+ .num_timing_generator = 6,
+ .num_opp = 6,
+ .num_video_plane = 6,
+ .num_audio = 6,
+ .num_stream_encoder = 6,
+ .num_pll = 6,
+ .num_dwb = 1,
+ .num_ddc = 6,
+ .num_vmid = 16,
+ .num_mpc_3dlut = 3,
+ .num_dsc = 6,
+};
+
+static const struct dc_plane_cap plane_cap = {
+ .type = DC_PLANE_TYPE_DCN_UNIVERSAL,
+ .blends_with_above = true,
+ .blends_with_below = true,
+ .per_pixel_alpha = true,
+
+ .pixel_format_support = {
+ .argb8888 = true,
+ .nv12 = true,
+ .fp16 = true,
+ .p010 = false,
+ .ayuv = false,
+ },
+
+ .max_upscale_factor = {
+ .argb8888 = 16000,
+ .nv12 = 16000,
+ .fp16 = 16000
+ },
+
+ .max_downscale_factor = {
+ .argb8888 = 600,
+ .nv12 = 600,
+ .fp16 = 600
+ }
+};
+
+static const struct dc_debug_options debug_defaults_drv = {
+ .disable_dmcu = true,
+ .force_abm_enable = false,
+ .timing_trace = false,
+ .clock_trace = true,
+ .disable_pplib_clock_request = true,
+ .pipe_split_policy = MPC_SPLIT_DYNAMIC,
+ .force_single_disp_pipe_split = false,
+ .disable_dcc = DCC_ENABLE,
+ .vsr_support = true,
+ .performance_trace = false,
+ .max_downscale_src_width = 7680,/*upto 8K*/
+ .disable_pplib_wm_range = false,
+ .scl_reset_length10 = true,
+ .sanity_checks = false,
+ .underflow_assert_delay_us = 0xFFFFFFFF,
+ .dwb_fi_phase = -1, // -1 = disable,
+ .dmub_command_table = true,
+};
+
+static const struct dc_debug_options debug_defaults_diags = {
+ .disable_dmcu = true,
+ .force_abm_enable = false,
+ .timing_trace = true,
+ .clock_trace = true,
+ .disable_dpp_power_gate = true,
+ .disable_hubp_power_gate = true,
+ .disable_clock_gate = true,
+ .disable_pplib_clock_request = true,
+ .disable_pplib_wm_range = true,
+ .disable_stutter = false,
+ .scl_reset_length10 = true,
+ .dwb_fi_phase = -1, // -1 = disable
+ .dmub_command_table = true,
+};
+
+void dcn30_dpp_destroy(struct dpp **dpp)
+{
+ kfree(TO_DCN20_DPP(*dpp));
+ *dpp = NULL;
+}
+
+struct dpp *dcn30_dpp_create(
+ struct dc_context *ctx,
+ uint32_t inst)
+{
+ struct dcn3_dpp *dpp =
+ kzalloc(sizeof(struct dcn3_dpp), GFP_KERNEL);
+
+ if (!dpp)
+ return NULL;
+
+ if (dpp3_construct(dpp, ctx, inst,
+ &dpp_regs[inst], &tf_shift, &tf_mask))
+ return &dpp->base;
+
+ BREAK_TO_DEBUGGER();
+ kfree(dpp);
+ return NULL;
+}
+struct output_pixel_processor *dcn30_opp_create(
+ struct dc_context *ctx, uint32_t inst)
+{
+ struct dcn20_opp *opp =
+ kzalloc(sizeof(struct dcn20_opp), GFP_KERNEL);
+
+ if (!opp) {
+ BREAK_TO_DEBUGGER();
+ return NULL;
+ }
+
+ dcn20_opp_construct(opp, ctx, inst,
+ &opp_regs[inst], &opp_shift, &opp_mask);
+ return &opp->base;
+}
+
+struct dce_aux *dcn30_aux_engine_create(
+ struct dc_context *ctx,
+ uint32_t inst)
+{
+ struct aux_engine_dce110 *aux_engine =
+ kzalloc(sizeof(struct aux_engine_dce110), GFP_KERNEL);
+
+ if (!aux_engine)
+ return NULL;
+
+ dce110_aux_engine_construct(aux_engine, ctx, inst,
+ SW_AUX_TIMEOUT_PERIOD_MULTIPLIER * AUX_TIMEOUT_PERIOD,
+ &aux_engine_regs[inst],
+ &aux_mask,
+ &aux_shift,
+ ctx->dc->caps.extended_aux_timeout_support);
+
+ return &aux_engine->base;
+}
+#define i2c_inst_regs(id) { I2C_HW_ENGINE_COMMON_REG_LIST(id) }
+
+static const struct dce_i2c_registers i2c_hw_regs[] = {
+ i2c_inst_regs(1),
+ i2c_inst_regs(2),
+ i2c_inst_regs(3),
+ i2c_inst_regs(4),
+ i2c_inst_regs(5),
+ i2c_inst_regs(6),
+};
+
+static const struct dce_i2c_shift i2c_shifts = {
+ I2C_COMMON_MASK_SH_LIST_DCN2(__SHIFT)
+};
+
+static const struct dce_i2c_mask i2c_masks = {
+ I2C_COMMON_MASK_SH_LIST_DCN2(_MASK)
+};
+
+struct dce_i2c_hw *dcn30_i2c_hw_create(
+ struct dc_context *ctx,
+ uint32_t inst)
+{
+ struct dce_i2c_hw *dce_i2c_hw =
+ kzalloc(sizeof(struct dce_i2c_hw), GFP_KERNEL);
+
+ if (!dce_i2c_hw)
+ return NULL;
+
+ dcn2_i2c_hw_construct(dce_i2c_hw, ctx, inst,
+ &i2c_hw_regs[inst], &i2c_shifts, &i2c_masks);
+
+ return dce_i2c_hw;
+}
+static struct mpc *dcn30_mpc_create(
+ struct dc_context *ctx,
+ int num_mpcc,
+ int num_rmu)
+{
+ struct dcn30_mpc *mpc30 = kzalloc(sizeof(struct dcn30_mpc),
+ GFP_KERNEL);
+
+ if (!mpc30)
+ return NULL;
+
+ dcn30_mpc_construct(mpc30, ctx,
+ &mpc_regs,
+ &mpc_shift,
+ &mpc_mask,
+ num_mpcc,
+ num_rmu);
+
+ return &mpc30->base;
+}
+
+struct hubbub *dcn30_hubbub_create(struct dc_context *ctx)
+{
+ int i;
+
+ struct dcn20_hubbub *hubbub3 = kzalloc(sizeof(struct dcn20_hubbub),
+ GFP_KERNEL);
+
+ if (!hubbub3)
+ return NULL;
+
+ hubbub3_construct(hubbub3, ctx,
+ &hubbub_reg,
+ &hubbub_shift,
+ &hubbub_mask);
+
+
+ for (i = 0; i < res_cap_dcn3.num_vmid; i++) {
+ struct dcn20_vmid *vmid = &hubbub3->vmid[i];
+
+ vmid->ctx = ctx;
+
+ vmid->regs = &vmid_regs[i];
+ vmid->shifts = &vmid_shifts;
+ vmid->masks = &vmid_masks;
+ }
+
+ return &hubbub3->base;
+}
+
+struct timing_generator *dcn30_timing_generator_create(
+ struct dc_context *ctx,
+ uint32_t instance)
+{
+ struct optc *tgn10 =
+ kzalloc(sizeof(struct optc), GFP_KERNEL);
+
+ if (!tgn10)
+ return NULL;
+
+ tgn10->base.inst = instance;
+ tgn10->base.ctx = ctx;
+
+ tgn10->tg_regs = &optc_regs[instance];
+ tgn10->tg_shift = &optc_shift;
+ tgn10->tg_mask = &optc_mask;
+
+ dcn30_timing_generator_init(tgn10);
+
+ return &tgn10->base;
+}
+
+static const struct encoder_feature_support link_enc_feature = {
+ .max_hdmi_deep_color = COLOR_DEPTH_121212,
+ .max_hdmi_pixel_clock = 600000,
+ .hdmi_ycbcr420_supported = true,
+ .dp_ycbcr420_supported = true,
+ .fec_supported = true,
+ .flags.bits.IS_HBR2_CAPABLE = true,
+ .flags.bits.IS_HBR3_CAPABLE = true,
+ .flags.bits.IS_TPS3_CAPABLE = true,
+ .flags.bits.IS_TPS4_CAPABLE = true
+};
+
+struct link_encoder *dcn30_link_encoder_create(
+ const struct encoder_init_data *enc_init_data)
+{
+ struct dcn20_link_encoder *enc20 =
+ kzalloc(sizeof(struct dcn20_link_encoder), GFP_KERNEL);
+
+ if (!enc20)
+ return NULL;
+
+ dcn30_link_encoder_construct(enc20,
+ enc_init_data,
+ &link_enc_feature,
+ &link_enc_regs[enc_init_data->transmitter],
+ &link_enc_aux_regs[enc_init_data->channel - 1],
+ &link_enc_hpd_regs[enc_init_data->hpd_source],
+ &le_shift,
+ &le_mask);
+
+ return &enc20->enc10.base;
+}
+
+struct panel_cntl *dcn30_panel_cntl_create(const struct panel_cntl_init_data *init_data)
+{
+ struct dce_panel_cntl *panel_cntl =
+ kzalloc(sizeof(struct dce_panel_cntl), GFP_KERNEL);
+
+ if (!panel_cntl)
+ return NULL;
+
+ dce_panel_cntl_construct(panel_cntl,
+ init_data,
+ &panel_cntl_regs[init_data->inst],
+ &panel_cntl_shift,
+ &panel_cntl_mask);
+
+ return &panel_cntl->base;
+}
+
+static void read_dce_straps(
+ struct dc_context *ctx,
+ struct resource_straps *straps)
+{
+ generic_reg_get(ctx, mmDC_PINSTRAPS + BASE(mmDC_PINSTRAPS_BASE_IDX),
+ FN(DC_PINSTRAPS, DC_PINSTRAPS_AUDIO), &straps->dc_pinstraps_audio);
+
+}
+
+static struct audio *dcn30_create_audio(
+ struct dc_context *ctx, unsigned int inst)
+{
+ return dce_audio_create(ctx, inst,
+ &audio_regs[inst], &audio_shift, &audio_mask);
+}
+
+static struct vpg *dcn30_vpg_create(
+ struct dc_context *ctx,
+ uint32_t inst)
+{
+ struct dcn30_vpg *vpg3 = kzalloc(sizeof(struct dcn30_vpg), GFP_KERNEL);
+
+ if (!vpg3)
+ return NULL;
+
+ vpg3_construct(vpg3, ctx, inst,
+ &vpg_regs[inst],
+ &vpg_shift,
+ &vpg_mask);
+
+ return &vpg3->base;
+}
+
+static struct afmt *dcn30_afmt_create(
+ struct dc_context *ctx,
+ uint32_t inst)
+{
+ struct dcn30_afmt *afmt3 = kzalloc(sizeof(struct dcn30_afmt), GFP_KERNEL);
+
+ if (!afmt3)
+ return NULL;
+
+ afmt3_construct(afmt3, ctx, inst,
+ &afmt_regs[inst],
+ &afmt_shift,
+ &afmt_mask);
+
+ return &afmt3->base;
+}
+
+struct stream_encoder *dcn30_stream_encoder_create(
+ enum engine_id eng_id,
+ struct dc_context *ctx)
+{
+ struct dcn10_stream_encoder *enc1;
+ struct vpg *vpg;
+ struct afmt *afmt;
+ int vpg_inst;
+ int afmt_inst;
+
+ /* Mapping of VPG, AFMT, DME register blocks to DIO block instance */
+ if (eng_id <= ENGINE_ID_DIGF) {
+ vpg_inst = eng_id;
+ afmt_inst = eng_id;
+ } else
+ return NULL;
+
+ enc1 = kzalloc(sizeof(struct dcn10_stream_encoder), GFP_KERNEL);
+ vpg = dcn30_vpg_create(ctx, vpg_inst);
+ afmt = dcn30_afmt_create(ctx, afmt_inst);
+
+ if (!enc1 || !vpg || !afmt)
+ return NULL;
+
+ dcn30_dio_stream_encoder_construct(enc1, ctx, ctx->dc_bios,
+ eng_id, vpg, afmt,
+ &stream_enc_regs[eng_id],
+ &se_shift, &se_mask);
+
+ return &enc1->base;
+}
+
+struct dce_hwseq *dcn30_hwseq_create(
+ struct dc_context *ctx)
+{
+ struct dce_hwseq *hws = kzalloc(sizeof(struct dce_hwseq), GFP_KERNEL);
+
+ if (hws) {
+ hws->ctx = ctx;
+ hws->regs = &hwseq_reg;
+ hws->shifts = &hwseq_shift;
+ hws->masks = &hwseq_mask;
+ }
+ return hws;
+}
+static const struct resource_create_funcs res_create_funcs = {
+ .read_dce_straps = read_dce_straps,
+ .create_audio = dcn30_create_audio,
+ .create_stream_encoder = dcn30_stream_encoder_create,
+ .create_hwseq = dcn30_hwseq_create,
+};
+
+static const struct resource_create_funcs res_create_maximus_funcs = {
+ .read_dce_straps = NULL,
+ .create_audio = NULL,
+ .create_stream_encoder = NULL,
+ .create_hwseq = dcn30_hwseq_create,
+};
+
+static void dcn30_resource_destruct(struct dcn30_resource_pool *pool)
+{
+ unsigned int i;
+
+ for (i = 0; i < pool->base.stream_enc_count; i++) {
+ if (pool->base.stream_enc[i] != NULL) {
+ if (pool->base.stream_enc[i]->vpg != NULL) {
+ kfree(DCN30_VPG_FROM_VPG(pool->base.stream_enc[i]->vpg));
+ pool->base.stream_enc[i]->vpg = NULL;
+ }
+ if (pool->base.stream_enc[i]->afmt != NULL) {
+ kfree(DCN30_AFMT_FROM_AFMT(pool->base.stream_enc[i]->afmt));
+ pool->base.stream_enc[i]->afmt = NULL;
+ }
+ kfree(DCN10STRENC_FROM_STRENC(pool->base.stream_enc[i]));
+ pool->base.stream_enc[i] = NULL;
+ }
+ }
+
+ for (i = 0; i < pool->base.res_cap->num_dsc; i++) {
+ if (pool->base.dscs[i] != NULL)
+ dcn20_dsc_destroy(&pool->base.dscs[i]);
+ }
+
+ if (pool->base.mpc != NULL) {
+ kfree(TO_DCN20_MPC(pool->base.mpc));
+ pool->base.mpc = NULL;
+ }
+ if (pool->base.hubbub != NULL) {
+ kfree(pool->base.hubbub);
+ pool->base.hubbub = NULL;
+ }
+ for (i = 0; i < pool->base.pipe_count; i++) {
+ if (pool->base.dpps[i] != NULL)
+ dcn30_dpp_destroy(&pool->base.dpps[i]);
+
+ if (pool->base.ipps[i] != NULL)
+ pool->base.ipps[i]->funcs->ipp_destroy(&pool->base.ipps[i]);
+
+ if (pool->base.hubps[i] != NULL) {
+ kfree(TO_DCN20_HUBP(pool->base.hubps[i]));
+ pool->base.hubps[i] = NULL;
+ }
+
+ if (pool->base.irqs != NULL) {
+ dal_irq_service_destroy(&pool->base.irqs);
+ }
+ }
+
+ for (i = 0; i < pool->base.res_cap->num_ddc; i++) {
+ if (pool->base.engines[i] != NULL)
+ dce110_engine_destroy(&pool->base.engines[i]);
+ if (pool->base.hw_i2cs[i] != NULL) {
+ kfree(pool->base.hw_i2cs[i]);
+ pool->base.hw_i2cs[i] = NULL;
+ }
+ if (pool->base.sw_i2cs[i] != NULL) {
+ kfree(pool->base.sw_i2cs[i]);
+ pool->base.sw_i2cs[i] = NULL;
+ }
+ }
+
+ for (i = 0; i < pool->base.res_cap->num_opp; i++) {
+ if (pool->base.opps[i] != NULL)
+ pool->base.opps[i]->funcs->opp_destroy(&pool->base.opps[i]);
+ }
+
+ for (i = 0; i < pool->base.res_cap->num_timing_generator; i++) {
+ if (pool->base.timing_generators[i] != NULL) {
+ kfree(DCN10TG_FROM_TG(pool->base.timing_generators[i]));
+ pool->base.timing_generators[i] = NULL;
+ }
+ }
+
+ for (i = 0; i < pool->base.res_cap->num_dwb; i++) {
+ if (pool->base.dwbc[i] != NULL) {
+ kfree(TO_DCN30_DWBC(pool->base.dwbc[i]));
+ pool->base.dwbc[i] = NULL;
+ }
+ if (pool->base.mcif_wb[i] != NULL) {
+ kfree(TO_DCN30_MMHUBBUB(pool->base.mcif_wb[i]));
+ pool->base.mcif_wb[i] = NULL;
+ }
+ }
+
+ for (i = 0; i < pool->base.audio_count; i++) {
+ if (pool->base.audios[i])
+ dce_aud_destroy(&pool->base.audios[i]);
+ }
+
+ for (i = 0; i < pool->base.clk_src_count; i++) {
+ if (pool->base.clock_sources[i] != NULL) {
+ dcn20_clock_source_destroy(&pool->base.clock_sources[i]);
+ pool->base.clock_sources[i] = NULL;
+ }
+ }
+
+ for (i = 0; i < pool->base.res_cap->num_mpc_3dlut; i++) {
+ if (pool->base.mpc_lut[i] != NULL) {
+ dc_3dlut_func_release(pool->base.mpc_lut[i]);
+ pool->base.mpc_lut[i] = NULL;
+ }
+ if (pool->base.mpc_shaper[i] != NULL) {
+ dc_transfer_func_release(pool->base.mpc_shaper[i]);
+ pool->base.mpc_shaper[i] = NULL;
+ }
+ }
+
+ if (pool->base.dp_clock_source != NULL) {
+ dcn20_clock_source_destroy(&pool->base.dp_clock_source);
+ pool->base.dp_clock_source = NULL;
+ }
+
+ for (i = 0; i < pool->base.pipe_count; i++) {
+ if (pool->base.multiple_abms[i] != NULL)
+ dce_abm_destroy(&pool->base.multiple_abms[i]);
+ }
+
+ if (pool->base.dccg != NULL)
+ dcn_dccg_destroy(&pool->base.dccg);
+}
+
+struct hubp *dcn30_hubp_create(
+ struct dc_context *ctx,
+ uint32_t inst)
+{
+ struct dcn20_hubp *hubp2 =
+ kzalloc(sizeof(struct dcn20_hubp), GFP_KERNEL);
+
+ if (!hubp2)
+ return NULL;
+
+ if (hubp3_construct(hubp2, ctx, inst,
+ &hubp_regs[inst], &hubp_shift, &hubp_mask))
+ return &hubp2->base;
+
+ BREAK_TO_DEBUGGER();
+ kfree(hubp2);
+ return NULL;
+}
+
+bool dcn30_dwbc_create(struct dc_context *ctx, struct resource_pool *pool)
+{
+ int i;
+ uint32_t pipe_count = pool->res_cap->num_dwb;
+
+ for (i = 0; i < pipe_count; i++) {
+ struct dcn30_dwbc *dwbc30 = kzalloc(sizeof(struct dcn30_dwbc),
+ GFP_KERNEL);
+
+ if (!dwbc30) {
+ dm_error("DC: failed to create dwbc30!\n");
+ return false;
+ }
+
+ dcn30_dwbc_construct(dwbc30, ctx,
+ &dwbc30_regs[i],
+ &dwbc30_shift,
+ &dwbc30_mask,
+ i);
+
+ pool->dwbc[i] = &dwbc30->base;
+ }
+ return true;
+}
+
+bool dcn30_mmhubbub_create(struct dc_context *ctx, struct resource_pool *pool)
+{
+ int i;
+ uint32_t pipe_count = pool->res_cap->num_dwb;
+
+ for (i = 0; i < pipe_count; i++) {
+ struct dcn30_mmhubbub *mcif_wb30 = kzalloc(sizeof(struct dcn30_mmhubbub),
+ GFP_KERNEL);
+
+ if (!mcif_wb30) {
+ dm_error("DC: failed to create mcif_wb30!\n");
+ return false;
+ }
+
+ dcn30_mmhubbub_construct(mcif_wb30, ctx,
+ &mcif_wb30_regs[i],
+ &mcif_wb30_shift,
+ &mcif_wb30_mask,
+ i);
+
+ pool->mcif_wb[i] = &mcif_wb30->base;
+ }
+ return true;
+}
+
+static struct display_stream_compressor *dcn30_dsc_create(
+ struct dc_context *ctx, uint32_t inst)
+{
+ struct dcn20_dsc *dsc =
+ kzalloc(sizeof(struct dcn20_dsc), GFP_KERNEL);
+
+ if (!dsc) {
+ BREAK_TO_DEBUGGER();
+ return NULL;
+ }
+
+ dsc2_construct(dsc, ctx, inst, &dsc_regs[inst], &dsc_shift, &dsc_mask);
+ return &dsc->base;
+}
+
+enum dc_status dcn30_add_stream_to_ctx(struct dc *dc, struct dc_state *new_ctx, struct dc_stream_state *dc_stream)
+{
+
+ return dcn20_add_stream_to_ctx(dc, new_ctx, dc_stream);
+}
+
+static void dcn30_destroy_resource_pool(struct resource_pool **pool)
+{
+ struct dcn30_resource_pool *dcn30_pool = TO_DCN30_RES_POOL(*pool);
+
+ dcn30_resource_destruct(dcn30_pool);
+ kfree(dcn30_pool);
+ *pool = NULL;
+}
+
+static struct clock_source *dcn30_clock_source_create(
+ struct dc_context *ctx,
+ struct dc_bios *bios,
+ enum clock_source_id id,
+ const struct dce110_clk_src_regs *regs,
+ bool dp_clk_src)
+{
+ struct dce110_clk_src *clk_src =
+ kzalloc(sizeof(struct dce110_clk_src), GFP_KERNEL);
+
+ if (!clk_src)
+ return NULL;
+
+ if (dcn3_clk_src_construct(clk_src, ctx, bios, id,
+ regs, &cs_shift, &cs_mask)) {
+ clk_src->base.dp_clk_src = dp_clk_src;
+ return &clk_src->base;
+ }
+
+ BREAK_TO_DEBUGGER();
+ return NULL;
+}
+
+int dcn30_populate_dml_pipes_from_context(
+ struct dc *dc, struct dc_state *context,
+ display_e2e_pipe_params_st *pipes)
+{
+ int i, pipe_cnt;
+ struct resource_context *res_ctx = &context->res_ctx;
+
+ dcn20_populate_dml_pipes_from_context(dc, context, pipes);
+
+ for (i = 0, pipe_cnt = 0; i < dc->res_pool->pipe_count; i++) {
+ if (!res_ctx->pipe_ctx[i].stream)
+ continue;
+
+ pipes[pipe_cnt++].pipe.scale_ratio_depth.lb_depth =
+ dm_lb_16;
+ }
+
+ return pipe_cnt;
+}
+
+void dcn30_populate_dml_writeback_from_context(
+ struct dc *dc, struct resource_context *res_ctx, display_e2e_pipe_params_st *pipes)
+{
+ int pipe_cnt, i, j;
+ double max_calc_writeback_dispclk;
+ double writeback_dispclk;
+ struct writeback_st dout_wb;
+
+ for (i = 0, pipe_cnt = 0; i < dc->res_pool->pipe_count; i++) {
+ struct dc_stream_state *stream = res_ctx->pipe_ctx[i].stream;
+
+ if (!stream)
+ continue;
+ max_calc_writeback_dispclk = 0;
+
+ /* Set writeback information */
+ pipes[pipe_cnt].dout.wb_enable = 0;
+ pipes[pipe_cnt].dout.num_active_wb = 0;
+ for (j = 0; j < stream->num_wb_info; j++) {
+ struct dc_writeback_info *wb_info = &stream->writeback_info[j];
+
+ if (wb_info->wb_enabled && wb_info->writeback_source_plane &&
+ (wb_info->writeback_source_plane == res_ctx->pipe_ctx[i].plane_state)) {
+ pipes[pipe_cnt].dout.wb_enable = 1;
+ pipes[pipe_cnt].dout.num_active_wb++;
+ dout_wb.wb_src_height = wb_info->dwb_params.cnv_params.crop_en ?
+ wb_info->dwb_params.cnv_params.crop_height :
+ wb_info->dwb_params.cnv_params.src_height;
+ dout_wb.wb_src_width = wb_info->dwb_params.cnv_params.crop_en ?
+ wb_info->dwb_params.cnv_params.crop_width :
+ wb_info->dwb_params.cnv_params.src_width;
+ dout_wb.wb_dst_width = wb_info->dwb_params.dest_width;
+ dout_wb.wb_dst_height = wb_info->dwb_params.dest_height;
+
+ /* For IP that doesn't support WB scaling, set h/v taps to 1 to avoid DML validation failure */
+ if (dc->dml.ip.writeback_max_hscl_taps > 1) {
+ dout_wb.wb_htaps_luma = wb_info->dwb_params.scaler_taps.h_taps;
+ dout_wb.wb_vtaps_luma = wb_info->dwb_params.scaler_taps.v_taps;
+ } else {
+ dout_wb.wb_htaps_luma = 1;
+ dout_wb.wb_vtaps_luma = 1;
+ }
+ dout_wb.wb_htaps_chroma = 0;
+ dout_wb.wb_vtaps_chroma = 0;
+ dout_wb.wb_hratio = wb_info->dwb_params.cnv_params.crop_en ?
+ (double)wb_info->dwb_params.cnv_params.crop_width /
+ (double)wb_info->dwb_params.dest_width :
+ (double)wb_info->dwb_params.cnv_params.src_width /
+ (double)wb_info->dwb_params.dest_width;
+ dout_wb.wb_vratio = wb_info->dwb_params.cnv_params.crop_en ?
+ (double)wb_info->dwb_params.cnv_params.crop_height /
+ (double)wb_info->dwb_params.dest_height :
+ (double)wb_info->dwb_params.cnv_params.src_height /
+ (double)wb_info->dwb_params.dest_height;
+ if (wb_info->dwb_params.cnv_params.fc_out_format == DWB_OUT_FORMAT_64BPP_ARGB ||
+ wb_info->dwb_params.cnv_params.fc_out_format == DWB_OUT_FORMAT_64BPP_RGBA)
+ dout_wb.wb_pixel_format = dm_444_64;
+ else
+ dout_wb.wb_pixel_format = dm_444_32;
+
+ /* Workaround for cases where multiple writebacks are connected to same plane
+ * In which case, need to compute worst case and set the associated writeback parameters
+ * This workaround is necessary due to DML computation assuming only 1 set of writeback
+ * parameters per pipe
+ */
+ writeback_dispclk = dml30_CalculateWriteBackDISPCLK(
+ dout_wb.wb_pixel_format,
+ pipes[pipe_cnt].pipe.dest.pixel_rate_mhz,
+ dout_wb.wb_hratio,
+ dout_wb.wb_vratio,
+ dout_wb.wb_htaps_luma,
+ dout_wb.wb_vtaps_luma,
+ dout_wb.wb_src_width,
+ dout_wb.wb_dst_width,
+ pipes[pipe_cnt].pipe.dest.htotal,
+ dc->current_state->bw_ctx.dml.ip.writeback_line_buffer_buffer_size);
+
+ if (writeback_dispclk > max_calc_writeback_dispclk) {
+ max_calc_writeback_dispclk = writeback_dispclk;
+ pipes[pipe_cnt].dout.wb = dout_wb;
+ }
+ }
+ }
+
+ pipe_cnt++;
+ }
+
+}
+
+unsigned int dcn30_calc_max_scaled_time(
+ unsigned int time_per_pixel,
+ enum mmhubbub_wbif_mode mode,
+ unsigned int urgent_watermark)
+{
+ unsigned int time_per_byte = 0;
+ unsigned int total_free_entry = 0xb40;
+ unsigned int buf_lh_capability;
+ unsigned int max_scaled_time;
+
+ if (mode == PACKED_444) /* packed mode 32 bpp */
+ time_per_byte = time_per_pixel/4;
+ else if (mode == PACKED_444_FP16) /* packed mode 64 bpp */
+ time_per_byte = time_per_pixel/8;
+
+ if (time_per_byte == 0)
+ time_per_byte = 1;
+
+ buf_lh_capability = (total_free_entry*time_per_byte*32) >> 6; /* time_per_byte is in u6.6*/
+ max_scaled_time = buf_lh_capability - urgent_watermark;
+ return max_scaled_time;
+}
+
+void dcn30_set_mcif_arb_params(
+ struct dc *dc,
+ struct dc_state *context,
+ display_e2e_pipe_params_st *pipes,
+ int pipe_cnt)
+{
+ enum mmhubbub_wbif_mode wbif_mode;
+ struct display_mode_lib *dml = &context->bw_ctx.dml;
+ struct mcif_arb_params *wb_arb_params;
+ int i, j, k, dwb_pipe;
+
+ /* Writeback MCIF_WB arbitration parameters */
+ dwb_pipe = 0;
+ for (i = 0; i < dc->res_pool->pipe_count; i++) {
+
+ if (!context->res_ctx.pipe_ctx[i].stream)
+ continue;
+
+ for (j = 0; j < MAX_DWB_PIPES; j++) {
+ struct dc_writeback_info *writeback_info = &context->res_ctx.pipe_ctx[i].stream->writeback_info[j];
+
+ if (writeback_info->wb_enabled == false)
+ continue;
+
+ //wb_arb_params = &context->res_ctx.pipe_ctx[i].stream->writeback_info[j].mcif_arb_params;
+ wb_arb_params = &context->bw_ctx.bw.dcn.bw_writeback.mcif_wb_arb[dwb_pipe];
+
+ if (writeback_info->dwb_params.cnv_params.fc_out_format == DWB_OUT_FORMAT_64BPP_ARGB ||
+ writeback_info->dwb_params.cnv_params.fc_out_format == DWB_OUT_FORMAT_64BPP_RGBA)
+ wbif_mode = PACKED_444_FP16;
+ else
+ wbif_mode = PACKED_444;
+
+ for (k = 0; k < sizeof(wb_arb_params->cli_watermark)/sizeof(wb_arb_params->cli_watermark[0]); k++) {
+ wb_arb_params->cli_watermark[k] = get_wm_writeback_urgent(dml, pipes, pipe_cnt) * 1000;
+ wb_arb_params->pstate_watermark[k] = get_wm_writeback_dram_clock_change(dml, pipes, pipe_cnt) * 1000;
+ }
+ wb_arb_params->time_per_pixel = (1000000 << 6) / context->res_ctx.pipe_ctx[i].stream->phy_pix_clk; /* time_per_pixel should be in u6.6 format */
+ wb_arb_params->slice_lines = 32;
+ wb_arb_params->arbitration_slice = 2; /* irrelevant since there is no YUV output */
+ wb_arb_params->max_scaled_time = dcn30_calc_max_scaled_time(wb_arb_params->time_per_pixel,
+ wbif_mode,
+ wb_arb_params->cli_watermark[0]); /* assume 4 watermark sets have the same value */
+ wb_arb_params->dram_speed_change_duration = dml->vba.WritebackAllowDRAMClockChangeEndPosition[j] * pipes[0].clks_cfg.refclk_mhz; /* num_clock_cycles = us * MHz */
+
+ dwb_pipe++;
+
+ if (dwb_pipe >= MAX_DWB_PIPES)
+ return;
+ }
+ if (dwb_pipe >= MAX_DWB_PIPES)
+ return;
+ }
+
+}
+
+static struct dc_cap_funcs cap_funcs = {
+ .get_dcc_compression_cap = dcn20_get_dcc_compression_cap
+};
+
+bool dcn30_acquire_post_bldn_3dlut(
+ struct resource_context *res_ctx,
+ const struct resource_pool *pool,
+ int mpcc_id,
+ struct dc_3dlut **lut,
+ struct dc_transfer_func **shaper)
+{
+ int i;
+ bool ret = false;
+ union dc_3dlut_state *state;
+
+ ASSERT(*lut == NULL && *shaper == NULL);
+ *lut = NULL;
+ *shaper = NULL;
+
+ for (i = 0; i < pool->res_cap->num_mpc_3dlut; i++) {
+ if (!res_ctx->is_mpc_3dlut_acquired[i]) {
+ *lut = pool->mpc_lut[i];
+ *shaper = pool->mpc_shaper[i];
+ state = &pool->mpc_lut[i]->state;
+ res_ctx->is_mpc_3dlut_acquired[i] = true;
+ state->bits.rmu_idx_valid = 1;
+ state->bits.rmu_mux_num = i;
+ if (state->bits.rmu_mux_num == 0)
+ state->bits.mpc_rmu0_mux = mpcc_id;
+ else if (state->bits.rmu_mux_num == 1)
+ state->bits.mpc_rmu1_mux = mpcc_id;
+ else if (state->bits.rmu_mux_num == 2)
+ state->bits.mpc_rmu2_mux = mpcc_id;
+ ret = true;
+ break;
+ }
+ }
+ return ret;
+}
+
+bool dcn30_release_post_bldn_3dlut(
+ struct resource_context *res_ctx,
+ const struct resource_pool *pool,
+ struct dc_3dlut **lut,
+ struct dc_transfer_func **shaper)
+{
+ int i;
+ bool ret = false;
+
+ for (i = 0; i < pool->res_cap->num_mpc_3dlut; i++) {
+ if (pool->mpc_lut[i] == *lut && pool->mpc_shaper[i] == *shaper) {
+ res_ctx->is_mpc_3dlut_acquired[i] = false;
+ pool->mpc_lut[i]->state.raw = 0;
+ *lut = NULL;
+ *shaper = NULL;
+ ret = true;
+ break;
+ }
+ }
+ return ret;
+}
+
+#define fixed16_to_double(x) (((double) x) / ((double) (1 << 16)))
+#define fixed16_to_double_to_cpu(x) fixed16_to_double(le32_to_cpu(x))
+
+static bool is_soc_bounding_box_valid(struct dc *dc)
+{
+ uint32_t hw_internal_rev = dc->ctx->asic_id.hw_internal_rev;
+
+ if (ASICREV_IS_SIENNA_CICHLID_P(hw_internal_rev))
+ return true;
+
+ return false;
+}
+
+static bool init_soc_bounding_box(struct dc *dc,
+ struct dcn30_resource_pool *pool)
+{
+ const struct gpu_info_soc_bounding_box_v1_0 *bb = dc->soc_bounding_box;
+ struct _vcs_dpi_soc_bounding_box_st *loaded_bb = &dcn3_0_soc;
+ struct _vcs_dpi_ip_params_st *loaded_ip = &dcn3_0_ip;
+
+ DC_LOGGER_INIT(dc->ctx->logger);
+
+ if (!bb && !is_soc_bounding_box_valid(dc)) {
+ DC_LOG_ERROR("%s: not valid soc bounding box/n", __func__);
+ return false;
+ }
+
+ if (bb && !is_soc_bounding_box_valid(dc)) {
+ int i;
+
+ dcn3_0_soc.sr_exit_time_us =
+ fixed16_to_double_to_cpu(bb->sr_exit_time_us);
+ dcn3_0_soc.sr_enter_plus_exit_time_us =
+ fixed16_to_double_to_cpu(bb->sr_enter_plus_exit_time_us);
+ dcn3_0_soc.urgent_latency_us =
+ fixed16_to_double_to_cpu(bb->urgent_latency_us);
+ dcn3_0_soc.urgent_latency_pixel_data_only_us =
+ fixed16_to_double_to_cpu(bb->urgent_latency_pixel_data_only_us);
+ dcn3_0_soc.urgent_latency_pixel_mixed_with_vm_data_us =
+ fixed16_to_double_to_cpu(bb->urgent_latency_pixel_mixed_with_vm_data_us);
+ dcn3_0_soc.urgent_latency_vm_data_only_us =
+ fixed16_to_double_to_cpu(bb->urgent_latency_vm_data_only_us);
+ dcn3_0_soc.urgent_out_of_order_return_per_channel_pixel_only_bytes =
+ le32_to_cpu(bb->urgent_out_of_order_return_per_channel_pixel_only_bytes);
+ dcn3_0_soc.urgent_out_of_order_return_per_channel_pixel_and_vm_bytes =
+ le32_to_cpu(bb->urgent_out_of_order_return_per_channel_pixel_and_vm_bytes);
+ dcn3_0_soc.urgent_out_of_order_return_per_channel_vm_only_bytes =
+ le32_to_cpu(bb->urgent_out_of_order_return_per_channel_vm_only_bytes);
+ dcn3_0_soc.pct_ideal_dram_sdp_bw_after_urgent_pixel_only =
+ fixed16_to_double_to_cpu(bb->pct_ideal_dram_sdp_bw_after_urgent_pixel_only);
+ dcn3_0_soc.pct_ideal_dram_sdp_bw_after_urgent_pixel_and_vm =
+ fixed16_to_double_to_cpu(bb->pct_ideal_dram_sdp_bw_after_urgent_pixel_and_vm);
+ dcn3_0_soc.pct_ideal_dram_sdp_bw_after_urgent_vm_only =
+ fixed16_to_double_to_cpu(bb->pct_ideal_dram_sdp_bw_after_urgent_vm_only);
+ dcn3_0_soc.max_avg_sdp_bw_use_normal_percent =
+ fixed16_to_double_to_cpu(bb->max_avg_sdp_bw_use_normal_percent);
+ dcn3_0_soc.max_avg_dram_bw_use_normal_percent =
+ fixed16_to_double_to_cpu(bb->max_avg_dram_bw_use_normal_percent);
+ dcn3_0_soc.writeback_latency_us =
+ fixed16_to_double_to_cpu(bb->writeback_latency_us);
+ dcn3_0_soc.ideal_dram_bw_after_urgent_percent =
+ fixed16_to_double_to_cpu(bb->ideal_dram_bw_after_urgent_percent);
+ dcn3_0_soc.max_request_size_bytes =
+ le32_to_cpu(bb->max_request_size_bytes);
+ dcn3_0_soc.dram_channel_width_bytes =
+ le32_to_cpu(bb->dram_channel_width_bytes);
+ dcn3_0_soc.fabric_datapath_to_dcn_data_return_bytes =
+ le32_to_cpu(bb->fabric_datapath_to_dcn_data_return_bytes);
+ dcn3_0_soc.dcn_downspread_percent =
+ fixed16_to_double_to_cpu(bb->dcn_downspread_percent);
+ dcn3_0_soc.downspread_percent =
+ fixed16_to_double_to_cpu(bb->downspread_percent);
+ dcn3_0_soc.dram_page_open_time_ns =
+ fixed16_to_double_to_cpu(bb->dram_page_open_time_ns);
+ dcn3_0_soc.dram_rw_turnaround_time_ns =
+ fixed16_to_double_to_cpu(bb->dram_rw_turnaround_time_ns);
+ dcn3_0_soc.dram_return_buffer_per_channel_bytes =
+ le32_to_cpu(bb->dram_return_buffer_per_channel_bytes);
+ dcn3_0_soc.round_trip_ping_latency_dcfclk_cycles =
+ le32_to_cpu(bb->round_trip_ping_latency_dcfclk_cycles);
+ dcn3_0_soc.urgent_out_of_order_return_per_channel_bytes =
+ le32_to_cpu(bb->urgent_out_of_order_return_per_channel_bytes);
+ dcn3_0_soc.channel_interleave_bytes =
+ le32_to_cpu(bb->channel_interleave_bytes);
+ dcn3_0_soc.num_banks =
+ le32_to_cpu(bb->num_banks);
+ dcn3_0_soc.num_chans =
+ le32_to_cpu(bb->num_chans);
+ dcn3_0_soc.gpuvm_min_page_size_bytes =
+ le32_to_cpu(bb->vmm_page_size_bytes);
+ dcn3_0_soc.dram_clock_change_latency_us =
+ fixed16_to_double_to_cpu(bb->dram_clock_change_latency_us);
+ dcn3_0_soc.writeback_dram_clock_change_latency_us =
+ fixed16_to_double_to_cpu(bb->writeback_dram_clock_change_latency_us);
+ dcn3_0_soc.return_bus_width_bytes =
+ le32_to_cpu(bb->return_bus_width_bytes);
+ dcn3_0_soc.dispclk_dppclk_vco_speed_mhz =
+ le32_to_cpu(bb->dispclk_dppclk_vco_speed_mhz);
+ dcn3_0_soc.xfc_bus_transport_time_us =
+ le32_to_cpu(bb->xfc_bus_transport_time_us);
+ dcn3_0_soc.xfc_xbuf_latency_tolerance_us =
+ le32_to_cpu(bb->xfc_xbuf_latency_tolerance_us);
+ dcn3_0_soc.use_urgent_burst_bw =
+ le32_to_cpu(bb->use_urgent_burst_bw);
+ dcn3_0_soc.num_states =
+ le32_to_cpu(bb->num_states);
+
+ for (i = 0; i < dcn3_0_soc.num_states; i++) {
+ dcn3_0_soc.clock_limits[i].state =
+ le32_to_cpu(bb->clock_limits[i].state);
+ dcn3_0_soc.clock_limits[i].dcfclk_mhz =
+ fixed16_to_double_to_cpu(bb->clock_limits[i].dcfclk_mhz);
+ dcn3_0_soc.clock_limits[i].fabricclk_mhz =
+ fixed16_to_double_to_cpu(bb->clock_limits[i].fabricclk_mhz);
+ dcn3_0_soc.clock_limits[i].dispclk_mhz =
+ fixed16_to_double_to_cpu(bb->clock_limits[i].dispclk_mhz);
+ dcn3_0_soc.clock_limits[i].dppclk_mhz =
+ fixed16_to_double_to_cpu(bb->clock_limits[i].dppclk_mhz);
+ dcn3_0_soc.clock_limits[i].phyclk_mhz =
+ fixed16_to_double_to_cpu(bb->clock_limits[i].phyclk_mhz);
+ dcn3_0_soc.clock_limits[i].socclk_mhz =
+ fixed16_to_double_to_cpu(bb->clock_limits[i].socclk_mhz);
+ dcn3_0_soc.clock_limits[i].dscclk_mhz =
+ fixed16_to_double_to_cpu(bb->clock_limits[i].dscclk_mhz);
+ dcn3_0_soc.clock_limits[i].dram_speed_mts =
+ fixed16_to_double_to_cpu(bb->clock_limits[i].dram_speed_mts);
+ }
+ }
+
+ loaded_ip->max_num_otg = pool->base.res_cap->num_timing_generator;
+ loaded_ip->max_num_dpp = pool->base.pipe_count;
+ loaded_ip->clamp_min_dcfclk = dc->config.clamp_min_dcfclk;
+ dcn20_patch_bounding_box(dc, loaded_bb);
+ return true;
+}
+
+static bool dcn30_split_stream_for_mpc_or_odm(
+ const struct dc *dc,
+ struct resource_context *res_ctx,
+ struct pipe_ctx *pri_pipe,
+ struct pipe_ctx *sec_pipe,
+ bool odm)
+{
+ int pipe_idx = sec_pipe->pipe_idx;
+ const struct resource_pool *pool = dc->res_pool;
+
+ *sec_pipe = *pri_pipe;
+
+ sec_pipe->pipe_idx = pipe_idx;
+ sec_pipe->plane_res.mi = pool->mis[pipe_idx];
+ sec_pipe->plane_res.hubp = pool->hubps[pipe_idx];
+ sec_pipe->plane_res.ipp = pool->ipps[pipe_idx];
+ sec_pipe->plane_res.xfm = pool->transforms[pipe_idx];
+ sec_pipe->plane_res.dpp = pool->dpps[pipe_idx];
+ sec_pipe->plane_res.mpcc_inst = pool->dpps[pipe_idx]->inst;
+ sec_pipe->stream_res.dsc = NULL;
+ if (odm) {
+ if (pri_pipe->next_odm_pipe) {
+ ASSERT(pri_pipe->next_odm_pipe != sec_pipe);
+ sec_pipe->next_odm_pipe = pri_pipe->next_odm_pipe;
+ sec_pipe->next_odm_pipe->prev_odm_pipe = sec_pipe;
+ }
+ pri_pipe->next_odm_pipe = sec_pipe;
+ sec_pipe->prev_odm_pipe = pri_pipe;
+ ASSERT(sec_pipe->top_pipe == NULL);
+
+ sec_pipe->stream_res.opp = pool->opps[pipe_idx];
+ if (sec_pipe->stream->timing.flags.DSC == 1) {
+ dcn20_acquire_dsc(dc, res_ctx, &sec_pipe->stream_res.dsc, pipe_idx);
+ ASSERT(sec_pipe->stream_res.dsc);
+ if (sec_pipe->stream_res.dsc == NULL)
+ return false;
+ }
+ } else {
+ if (pri_pipe->bottom_pipe) {
+ ASSERT(pri_pipe->bottom_pipe != sec_pipe);
+ sec_pipe->bottom_pipe = pri_pipe->bottom_pipe;
+ sec_pipe->bottom_pipe->top_pipe = sec_pipe;
+ }
+ pri_pipe->bottom_pipe = sec_pipe;
+ sec_pipe->top_pipe = pri_pipe;
+
+ ASSERT(pri_pipe->plane_state);
+ }
+
+ return true;
+}
+static bool dcn30_fast_validate_bw(
+ struct dc *dc,
+ struct dc_state *context,
+ display_e2e_pipe_params_st *pipes,
+ int *pipe_cnt_out,
+ int *vlevel_out)
+{
+ bool out = false;
+ bool repopulate_pipes = false;
+ int split[MAX_PIPES] = { 0 };
+ bool merge[MAX_PIPES] = { false };
+ bool newly_split[MAX_PIPES] = { false };
+ int pipe_cnt, i, pipe_idx, vlevel;
+ struct vba_vars_st *vba = &context->bw_ctx.dml.vba;
+
+ ASSERT(pipes);
+ if (!pipes)
+ return false;
+
+ pipe_cnt = dc->res_pool->funcs->populate_dml_pipes(dc, context, pipes);
+
+ if (!pipe_cnt) {
+ out = true;
+ goto validate_out;
+ }
+
+ dml_log_pipe_params(&context->bw_ctx.dml, pipes, pipe_cnt);
+
+ vlevel = dml_get_voltage_level(&context->bw_ctx.dml, pipes, pipe_cnt);
+
+ dml_log_mode_support_params(&context->bw_ctx.dml);
+
+ /* TODO: Need to check calculated vlevel why that fails validation of below resolutions */
+ if (context->res_ctx.pipe_ctx[0].stream != NULL) {
+ if (context->res_ctx.pipe_ctx[0].stream->timing.h_addressable == 640 && context->res_ctx.pipe_ctx[0].stream->timing.v_addressable == 480)
+ vlevel = 0;
+ if (context->res_ctx.pipe_ctx[0].stream->timing.h_addressable == 1280 && context->res_ctx.pipe_ctx[0].stream->timing.v_addressable == 800)
+ vlevel = 0;
+ if (context->res_ctx.pipe_ctx[0].stream->timing.h_addressable == 1280 && context->res_ctx.pipe_ctx[0].stream->timing.v_addressable == 768)
+ vlevel = 0;
+ if (context->res_ctx.pipe_ctx[0].stream->timing.h_addressable == 1280 && context->res_ctx.pipe_ctx[0].stream->timing.v_addressable == 1024)
+ vlevel = 0;
+ if (context->res_ctx.pipe_ctx[0].stream->timing.h_addressable == 2048 && context->res_ctx.pipe_ctx[0].stream->timing.v_addressable == 1536)
+ vlevel = 0;
+ }
+
+ if (vlevel == context->bw_ctx.dml.soc.num_states)
+ goto validate_fail;
+
+ for (i = 0, pipe_idx = 0; i < dc->res_pool->pipe_count; i++) {
+ struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
+ struct pipe_ctx *mpo_pipe = pipe->bottom_pipe;
+
+ if (!pipe->stream)
+ continue;
+
+ /* We only support full screen mpo with ODM */
+ if (vba->ODMCombineEnabled[vba->pipe_plane[pipe_idx]] != dm_odm_combine_mode_disabled
+ && pipe->plane_state && mpo_pipe
+ && memcmp(&mpo_pipe->plane_res.scl_data.recout,
+ &pipe->plane_res.scl_data.recout,
+ sizeof(struct rect)) != 0) {
+ ASSERT(mpo_pipe->plane_state != pipe->plane_state);
+ goto validate_fail;
+ }
+ pipe_idx++;
+ }
+
+ vlevel = dcn20_validate_apply_pipe_split_flags(dc, context, vlevel, split, merge);
+
+ /* merge pipes if necessary */
+ for (i = 0; i < dc->res_pool->pipe_count; i++) {
+ struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
+
+ /*skip pipes that don't need merging*/
+ if (!merge[i])
+ continue;
+
+ /* if ODM merge we ignore mpc tree, mpo pipes will have their own flags */
+ if (pipe->prev_odm_pipe) {
+ /*split off odm pipe*/
+ pipe->prev_odm_pipe->next_odm_pipe = pipe->next_odm_pipe;
+ if (pipe->next_odm_pipe)
+ pipe->next_odm_pipe->prev_odm_pipe = pipe->prev_odm_pipe;
+
+ pipe->bottom_pipe = NULL;
+ pipe->next_odm_pipe = NULL;
+ pipe->plane_state = NULL;
+ pipe->stream = NULL;
+ pipe->top_pipe = NULL;
+ pipe->prev_odm_pipe = NULL;
+ if (pipe->stream_res.dsc)
+ dcn20_release_dsc(&context->res_ctx, dc->res_pool, &pipe->stream_res.dsc);
+ memset(&pipe->plane_res, 0, sizeof(pipe->plane_res));
+ memset(&pipe->stream_res, 0, sizeof(pipe->stream_res));
+ } else if (pipe->top_pipe && pipe->top_pipe->plane_state == pipe->plane_state) {
+ struct pipe_ctx *top_pipe = pipe->top_pipe;
+ struct pipe_ctx *bottom_pipe = pipe->bottom_pipe;
+
+ top_pipe->bottom_pipe = bottom_pipe;
+ if (bottom_pipe)
+ bottom_pipe->top_pipe = top_pipe;
+
+ pipe->top_pipe = NULL;
+ pipe->bottom_pipe = NULL;
+ pipe->plane_state = NULL;
+ pipe->stream = NULL;
+ memset(&pipe->plane_res, 0, sizeof(pipe->plane_res));
+ memset(&pipe->stream_res, 0, sizeof(pipe->stream_res));
+ } else
+ ASSERT(0); /* Should never try to merge master pipe */
+
+ }
+
+ for (i = 0, pipe_idx = -1; i < dc->res_pool->pipe_count; i++) {
+ struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
+ struct pipe_ctx *hsplit_pipe = NULL;
+ bool odm;
+
+ if (!pipe->stream || newly_split[i])
+ continue;
+
+ pipe_idx++;
+ odm = vba->ODMCombineEnabled[vba->pipe_plane[pipe_idx]] != dm_odm_combine_mode_disabled;
+
+ if (!pipe->plane_state && !odm)
+ continue;
+
+ if (split[i]) {
+ hsplit_pipe = find_idle_secondary_pipe(&context->res_ctx, dc->res_pool, pipe);
+ ASSERT(hsplit_pipe);
+ if (!hsplit_pipe)
+ goto validate_fail;
+
+ if (!dcn30_split_stream_for_mpc_or_odm(
+ dc, &context->res_ctx,
+ pipe, hsplit_pipe, odm))
+ goto validate_fail;
+
+ newly_split[hsplit_pipe->pipe_idx] = true;
+ repopulate_pipes = true;
+ }
+ if (split[i] == 4) {
+ struct pipe_ctx *pipe_4to1 = find_idle_secondary_pipe(&context->res_ctx, dc->res_pool, pipe);
+
+ ASSERT(pipe_4to1);
+ if (!pipe_4to1)
+ goto validate_fail;
+ if (!dcn30_split_stream_for_mpc_or_odm(
+ dc, &context->res_ctx,
+ pipe, pipe_4to1, odm))
+ goto validate_fail;
+ newly_split[pipe_4to1->pipe_idx] = true;
+
+ pipe_4to1 = find_idle_secondary_pipe(&context->res_ctx, dc->res_pool, pipe);
+ ASSERT(pipe_4to1);
+ if (!pipe_4to1)
+ goto validate_fail;
+ if (!dcn30_split_stream_for_mpc_or_odm(
+ dc, &context->res_ctx,
+ hsplit_pipe, pipe_4to1, odm))
+ goto validate_fail;
+ newly_split[pipe_4to1->pipe_idx] = true;
+ }
+ if (odm)
+ dcn20_build_mapped_resource(dc, context, pipe->stream);
+ }
+
+ for (i = 0; i < dc->res_pool->pipe_count; i++) {
+ struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
+
+ if (pipe->plane_state) {
+ if (!resource_build_scaling_params(pipe))
+ goto validate_fail;
+ }
+ }
+
+ /* Actual dsc count per stream dsc validation*/
+ if (!dcn20_validate_dsc(dc, context)) {
+ vba->ValidationStatus[vba->soc.num_states] = DML_FAIL_DSC_VALIDATION_FAILURE;
+ goto validate_fail;
+ }
+
+ if (repopulate_pipes)
+ pipe_cnt = dc->res_pool->funcs->populate_dml_pipes(dc, context, pipes);
+ *vlevel_out = vlevel;
+ *pipe_cnt_out = pipe_cnt;
+
+ out = true;
+ goto validate_out;
+
+validate_fail:
+ out = false;
+
+validate_out:
+ return out;
+}
+
+static void dcn30_calculate_wm(
+ struct dc *dc, struct dc_state *context,
+ display_e2e_pipe_params_st *pipes,
+ int pipe_cnt,
+ int vlevel)
+{
+ int i, pipe_idx;
+ double dcfclk = context->bw_ctx.dml.vba.DCFCLKState[vlevel][context->bw_ctx.dml.vba.maxMpcComb];
+
+ if (context->bw_ctx.dml.soc.min_dcfclk > dcfclk)
+ dcfclk = context->bw_ctx.dml.soc.min_dcfclk;
+
+ pipes[0].clks_cfg.voltage = vlevel;
+ pipes[0].clks_cfg.dcfclk_mhz = dcfclk;
+ pipes[0].clks_cfg.socclk_mhz = context->bw_ctx.dml.soc.clock_limits[vlevel].socclk_mhz;
+
+ /* Set B:
+ * DCFCLK: 1GHz or min required above 1GHz
+ * FCLK/UCLK: Max
+ */
+ if (dc->clk_mgr->bw_params->wm_table.nv_entries[WM_B].valid) {
+ if (vlevel == 0) {
+ pipes[0].clks_cfg.voltage = 1;
+ pipes[0].clks_cfg.dcfclk_mhz = context->bw_ctx.dml.soc.clock_limits[0].dcfclk_mhz;
+ }
+ context->bw_ctx.dml.soc.dram_clock_change_latency_us = dc->clk_mgr->bw_params->wm_table.nv_entries[WM_B].dml_input.pstate_latency_us;
+ context->bw_ctx.dml.soc.sr_enter_plus_exit_time_us = dc->clk_mgr->bw_params->wm_table.nv_entries[WM_B].dml_input.sr_enter_plus_exit_time_us;
+ context->bw_ctx.dml.soc.sr_exit_time_us = dc->clk_mgr->bw_params->wm_table.nv_entries[WM_B].dml_input.sr_exit_time_us;
+ }
+ context->bw_ctx.bw.dcn.watermarks.b.urgent_ns = get_wm_urgent(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
+ context->bw_ctx.bw.dcn.watermarks.b.cstate_pstate.cstate_enter_plus_exit_ns = get_wm_stutter_enter_exit(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
+ context->bw_ctx.bw.dcn.watermarks.b.cstate_pstate.cstate_exit_ns = get_wm_stutter_exit(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
+ context->bw_ctx.bw.dcn.watermarks.b.cstate_pstate.pstate_change_ns = get_wm_dram_clock_change(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
+ context->bw_ctx.bw.dcn.watermarks.b.pte_meta_urgent_ns = get_wm_memory_trip(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
+ context->bw_ctx.bw.dcn.watermarks.b.frac_urg_bw_nom = get_fraction_of_urgent_bandwidth(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
+ context->bw_ctx.bw.dcn.watermarks.b.frac_urg_bw_flip = get_fraction_of_urgent_bandwidth_imm_flip(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
+ context->bw_ctx.bw.dcn.watermarks.b.urgent_latency_ns = get_urgent_latency(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
+
+ pipes[0].clks_cfg.voltage = vlevel;
+ pipes[0].clks_cfg.dcfclk_mhz = dcfclk;
+
+ /* Set C:
+ * DCFCLK: Min Required
+ * FCLK(proportional to UCLK): 1GHz or Max
+ * pstate latency overriden to 5us
+ */
+ if (dc->clk_mgr->bw_params->wm_table.nv_entries[WM_C].valid) {
+ context->bw_ctx.dml.soc.dram_clock_change_latency_us = dc->clk_mgr->bw_params->wm_table.nv_entries[WM_C].dml_input.pstate_latency_us;
+ context->bw_ctx.dml.soc.sr_enter_plus_exit_time_us = dc->clk_mgr->bw_params->wm_table.nv_entries[WM_C].dml_input.sr_enter_plus_exit_time_us;
+ context->bw_ctx.dml.soc.sr_exit_time_us = dc->clk_mgr->bw_params->wm_table.nv_entries[WM_C].dml_input.sr_exit_time_us;
+ }
+ context->bw_ctx.bw.dcn.watermarks.c.urgent_ns = get_wm_urgent(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
+ context->bw_ctx.bw.dcn.watermarks.c.cstate_pstate.cstate_enter_plus_exit_ns = get_wm_stutter_enter_exit(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
+ context->bw_ctx.bw.dcn.watermarks.c.cstate_pstate.cstate_exit_ns = get_wm_stutter_exit(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
+ context->bw_ctx.bw.dcn.watermarks.c.cstate_pstate.pstate_change_ns = get_wm_dram_clock_change(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
+ context->bw_ctx.bw.dcn.watermarks.c.pte_meta_urgent_ns = get_wm_memory_trip(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
+ context->bw_ctx.bw.dcn.watermarks.c.frac_urg_bw_nom = get_fraction_of_urgent_bandwidth(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
+ context->bw_ctx.bw.dcn.watermarks.c.frac_urg_bw_flip = get_fraction_of_urgent_bandwidth_imm_flip(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
+ context->bw_ctx.bw.dcn.watermarks.c.urgent_latency_ns = get_urgent_latency(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
+
+ /* Set D:
+ * DCFCLK: Min Required
+ * FCLK(proportional to UCLK): 1GHz or Max
+ * sr_enter_exit = 4, sr_exit = 2us
+ */
+ if (dc->clk_mgr->bw_params->wm_table.nv_entries[WM_D].valid) {
+ context->bw_ctx.dml.soc.dram_clock_change_latency_us = dc->clk_mgr->bw_params->wm_table.nv_entries[WM_D].dml_input.pstate_latency_us;
+ context->bw_ctx.dml.soc.sr_enter_plus_exit_time_us = dc->clk_mgr->bw_params->wm_table.nv_entries[WM_D].dml_input.sr_enter_plus_exit_time_us;
+ context->bw_ctx.dml.soc.sr_exit_time_us = dc->clk_mgr->bw_params->wm_table.nv_entries[WM_D].dml_input.sr_exit_time_us;
+ }
+ context->bw_ctx.bw.dcn.watermarks.d.urgent_ns = get_wm_urgent(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
+ context->bw_ctx.bw.dcn.watermarks.d.cstate_pstate.cstate_enter_plus_exit_ns = get_wm_stutter_enter_exit(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
+ context->bw_ctx.bw.dcn.watermarks.d.cstate_pstate.cstate_exit_ns = get_wm_stutter_exit(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
+ context->bw_ctx.bw.dcn.watermarks.d.cstate_pstate.pstate_change_ns = get_wm_dram_clock_change(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
+ context->bw_ctx.bw.dcn.watermarks.d.pte_meta_urgent_ns = get_wm_memory_trip(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
+ context->bw_ctx.bw.dcn.watermarks.d.frac_urg_bw_nom = get_fraction_of_urgent_bandwidth(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
+ context->bw_ctx.bw.dcn.watermarks.d.frac_urg_bw_flip = get_fraction_of_urgent_bandwidth_imm_flip(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
+ context->bw_ctx.bw.dcn.watermarks.d.urgent_latency_ns = get_urgent_latency(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
+
+ /* Set A:
+ * DCFCLK: Min Required
+ * FCLK(proportional to UCLK): 1GHz or Max
+ *
+ * Set A calculated last so that following calculations are based on Set A
+ */
+ if (dc->clk_mgr->bw_params->wm_table.nv_entries[WM_A].valid) {
+ context->bw_ctx.dml.soc.dram_clock_change_latency_us = dc->clk_mgr->bw_params->wm_table.nv_entries[WM_A].dml_input.pstate_latency_us;
+ context->bw_ctx.dml.soc.sr_enter_plus_exit_time_us = dc->clk_mgr->bw_params->wm_table.nv_entries[WM_A].dml_input.sr_enter_plus_exit_time_us;
+ context->bw_ctx.dml.soc.sr_exit_time_us = dc->clk_mgr->bw_params->wm_table.nv_entries[WM_A].dml_input.sr_exit_time_us;
+ }
+ context->bw_ctx.bw.dcn.watermarks.a.urgent_ns = get_wm_urgent(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
+ context->bw_ctx.bw.dcn.watermarks.a.cstate_pstate.cstate_enter_plus_exit_ns = get_wm_stutter_enter_exit(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
+ context->bw_ctx.bw.dcn.watermarks.a.cstate_pstate.cstate_exit_ns = get_wm_stutter_exit(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
+ context->bw_ctx.bw.dcn.watermarks.a.cstate_pstate.pstate_change_ns = get_wm_dram_clock_change(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
+ context->bw_ctx.bw.dcn.watermarks.a.pte_meta_urgent_ns = get_wm_memory_trip(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
+ context->bw_ctx.bw.dcn.watermarks.a.frac_urg_bw_nom = get_fraction_of_urgent_bandwidth(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
+ context->bw_ctx.bw.dcn.watermarks.a.frac_urg_bw_flip = get_fraction_of_urgent_bandwidth_imm_flip(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
+ context->bw_ctx.bw.dcn.watermarks.a.urgent_latency_ns = get_urgent_latency(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
+
+ for (i = 0, pipe_idx = 0; i < dc->res_pool->pipe_count; i++) {
+ if (!context->res_ctx.pipe_ctx[i].stream)
+ continue;
+
+ pipes[pipe_idx].clks_cfg.dispclk_mhz = get_dispclk_calculated(&context->bw_ctx.dml, pipes, pipe_cnt);
+ pipes[pipe_idx].clks_cfg.dppclk_mhz = get_dppclk_calculated(&context->bw_ctx.dml, pipes, pipe_cnt, pipe_idx);
+
+ if (dc->config.forced_clocks) {
+ pipes[pipe_idx].clks_cfg.dispclk_mhz = context->bw_ctx.dml.soc.clock_limits[0].dispclk_mhz;
+ pipes[pipe_idx].clks_cfg.dppclk_mhz = context->bw_ctx.dml.soc.clock_limits[0].dppclk_mhz;
+ }
+ if (dc->debug.min_disp_clk_khz > pipes[pipe_idx].clks_cfg.dispclk_mhz * 1000)
+ pipes[pipe_idx].clks_cfg.dispclk_mhz = dc->debug.min_disp_clk_khz / 1000.0;
+ if (dc->debug.min_dpp_clk_khz > pipes[pipe_idx].clks_cfg.dppclk_mhz * 1000)
+ pipes[pipe_idx].clks_cfg.dppclk_mhz = dc->debug.min_dpp_clk_khz / 1000.0;
+
+ pipe_idx++;
+ }
+}
+
+bool dcn30_validate_bandwidth(struct dc *dc, struct dc_state *context,
+ bool fast_validate)
+{
+ bool out = false;
+
+ BW_VAL_TRACE_SETUP();
+
+ int vlevel = 0;
+ int pipe_cnt = 0;
+ display_e2e_pipe_params_st *pipes = kzalloc(dc->res_pool->pipe_count * sizeof(display_e2e_pipe_params_st), GFP_KERNEL);
+ DC_LOGGER_INIT(dc->ctx->logger);
+
+ BW_VAL_TRACE_COUNT();
+
+ out = dcn30_fast_validate_bw(dc, context, pipes, &pipe_cnt, &vlevel);
+
+ if (pipe_cnt == 0)
+ goto validate_out;
+
+ if (!out)
+ goto validate_fail;
+
+ BW_VAL_TRACE_END_VOLTAGE_LEVEL();
+
+ if (fast_validate) {
+ BW_VAL_TRACE_SKIP(fast);
+ goto validate_out;
+ }
+
+ dcn30_calculate_wm(dc, context, pipes, pipe_cnt, vlevel);
+ dcn20_calculate_dlg_params(dc, context, pipes, pipe_cnt, vlevel);
+
+ BW_VAL_TRACE_END_WATERMARKS();
+
+ goto validate_out;
+
+validate_fail:
+ DC_LOG_WARNING("Mode Validation Warning: %s failed validation.\n",
+ dml_get_status_message(context->bw_ctx.dml.vba.ValidationStatus[context->bw_ctx.dml.vba.soc.num_states]));
+
+ BW_VAL_TRACE_SKIP(fail);
+ out = false;
+
+validate_out:
+ kfree(pipes);
+
+ BW_VAL_TRACE_FINISH();
+
+ return out;
+}
+
+static void get_optimal_dcfclk_fclk_for_uclk(unsigned int uclk_mts,
+ unsigned int *optimal_dcfclk,
+ unsigned int *optimal_fclk)
+{
+ double bw_from_dram, bw_from_dram1, bw_from_dram2;
+
+ bw_from_dram1 = uclk_mts * dcn3_0_soc.num_chans *
+ dcn3_0_soc.dram_channel_width_bytes * (dcn3_0_soc.max_avg_dram_bw_use_normal_percent / 100);
+ bw_from_dram2 = uclk_mts * dcn3_0_soc.num_chans *
+ dcn3_0_soc.dram_channel_width_bytes * (dcn3_0_soc.max_avg_sdp_bw_use_normal_percent / 100);
+
+ bw_from_dram = (bw_from_dram1 < bw_from_dram2) ? bw_from_dram1 : bw_from_dram2;
+
+ if (optimal_fclk)
+ *optimal_fclk = bw_from_dram /
+ (dcn3_0_soc.fabric_datapath_to_dcn_data_return_bytes * (dcn3_0_soc.max_avg_sdp_bw_use_normal_percent / 100));
+
+ if (optimal_dcfclk)
+ *optimal_dcfclk = bw_from_dram /
+ (dcn3_0_soc.return_bus_width_bytes * (dcn3_0_soc.max_avg_sdp_bw_use_normal_percent / 100));
+}
+
+static void dcn30_update_bw_bounding_box(struct dc *dc, struct clk_bw_params *bw_params)
+{
+ unsigned int i, j;
+ unsigned int num_states = 0;
+
+ unsigned int dcfclk_mhz[DC__VOLTAGE_STATES] = {0};
+ unsigned int dram_speed_mts[DC__VOLTAGE_STATES] = {0};
+ unsigned int optimal_uclk_for_dcfclk_sta_targets[DC__VOLTAGE_STATES] = {0};
+ unsigned int optimal_dcfclk_for_uclk[DC__VOLTAGE_STATES] = {0};
+
+ unsigned int dcfclk_sta_targets[DC__VOLTAGE_STATES] = {694, 875, 1000, 1200};
+ unsigned int num_dcfclk_sta_targets = 4;
+ unsigned int num_uclk_states;
+
+ if (dc->ctx->dc_bios->vram_info.num_chans)
+ dcn3_0_soc.num_chans = dc->ctx->dc_bios->vram_info.num_chans;
+
+ if (dc->ctx->dc_bios->vram_info.dram_channel_width_bytes)
+ dcn3_0_soc.dram_channel_width_bytes = dc->ctx->dc_bios->vram_info.dram_channel_width_bytes;
+
+ dcn3_0_soc.dispclk_dppclk_vco_speed_mhz = dc->clk_mgr->dentist_vco_freq_khz / 1000.0;
+ dc->dml.soc.dispclk_dppclk_vco_speed_mhz = dc->clk_mgr->dentist_vco_freq_khz / 1000.0;
+
+ if (bw_params->clk_table.entries[0].memclk_mhz) {
+
+ if (bw_params->clk_table.entries[1].dcfclk_mhz > dcfclk_sta_targets[num_dcfclk_sta_targets-1]) {
+ // If max DCFCLK is greater than the max DCFCLK STA target, insert into the DCFCLK STA target array
+ dcfclk_sta_targets[num_dcfclk_sta_targets] = bw_params->clk_table.entries[1].dcfclk_mhz;
+ num_dcfclk_sta_targets++;
+ } else if (bw_params->clk_table.entries[1].dcfclk_mhz < dcfclk_sta_targets[num_dcfclk_sta_targets-1]) {
+ // If max DCFCLK is less than the max DCFCLK STA target, cap values and remove duplicates
+ for (i = 0; i < num_dcfclk_sta_targets; i++) {
+ if (dcfclk_sta_targets[i] > bw_params->clk_table.entries[1].dcfclk_mhz) {
+ dcfclk_sta_targets[i] = bw_params->clk_table.entries[1].dcfclk_mhz;
+ break;
+ }
+ }
+ // Update size of array since we "removed" duplicates
+ num_dcfclk_sta_targets = i + 1;
+ }
+
+ num_uclk_states = bw_params->clk_table.num_entries;
+
+ // Calculate optimal dcfclk for each uclk
+ for (i = 0; i < num_uclk_states; i++) {
+ get_optimal_dcfclk_fclk_for_uclk(bw_params->clk_table.entries[i].memclk_mhz * 16,
+ &optimal_dcfclk_for_uclk[i], NULL);
+ if (optimal_dcfclk_for_uclk[i] < bw_params->clk_table.entries[0].dcfclk_mhz) {
+ optimal_dcfclk_for_uclk[i] = bw_params->clk_table.entries[0].dcfclk_mhz;
+ }
+ }
+
+ // Calculate optimal uclk for each dcfclk sta target
+ for (i = 0; i < num_dcfclk_sta_targets; i++) {
+ for (j = 0; j < num_uclk_states; j++) {
+ if (dcfclk_sta_targets[i] < optimal_dcfclk_for_uclk[j]) {
+ optimal_uclk_for_dcfclk_sta_targets[i] =
+ bw_params->clk_table.entries[j].memclk_mhz * 16;
+ break;
+ }
+ }
+ }
+
+ i = 0;
+ j = 0;
+ // create the final dcfclk and uclk table
+ while (i < num_dcfclk_sta_targets && j < num_uclk_states && num_states < DC__VOLTAGE_STATES) {
+ if (dcfclk_sta_targets[i] < optimal_dcfclk_for_uclk[j] && i < num_dcfclk_sta_targets) {
+ dcfclk_mhz[num_states] = dcfclk_sta_targets[i];
+ dram_speed_mts[num_states++] = optimal_uclk_for_dcfclk_sta_targets[i++];
+ } else {
+ if (j < num_uclk_states && optimal_dcfclk_for_uclk[j] <= bw_params->clk_table.entries[1].dcfclk_mhz) {
+ dcfclk_mhz[num_states] = optimal_dcfclk_for_uclk[j];
+ dram_speed_mts[num_states++] = bw_params->clk_table.entries[j++].memclk_mhz * 16;
+ } else {
+ j = num_uclk_states;
+ }
+ }
+ }
+
+ while (i < num_dcfclk_sta_targets && num_states < DC__VOLTAGE_STATES) {
+ dcfclk_mhz[num_states] = dcfclk_sta_targets[i];
+ dram_speed_mts[num_states++] = optimal_uclk_for_dcfclk_sta_targets[i++];
+ }
+
+ while (j < num_uclk_states && num_states < DC__VOLTAGE_STATES &&
+ optimal_dcfclk_for_uclk[j] <= bw_params->clk_table.entries[1].dcfclk_mhz) {
+ dcfclk_mhz[num_states] = optimal_dcfclk_for_uclk[j];
+ dram_speed_mts[num_states++] = bw_params->clk_table.entries[j++].memclk_mhz * 16;
+ }
+
+ for (i = 0; i < dcn3_0_soc.num_states; i++) {
+ dcn3_0_soc.clock_limits[i].state = i;
+ dcn3_0_soc.clock_limits[i].dcfclk_mhz = dcfclk_mhz[i];
+ dcn3_0_soc.clock_limits[i].fabricclk_mhz = dcfclk_mhz[i];
+ dcn3_0_soc.clock_limits[i].dram_speed_mts = dram_speed_mts[i];
+
+ /* Fill all states with max values of all other clocks */
+ dcn3_0_soc.clock_limits[i].dispclk_mhz = bw_params->clk_table.entries[1].dispclk_mhz;
+ dcn3_0_soc.clock_limits[i].dppclk_mhz = bw_params->clk_table.entries[1].dppclk_mhz;
+ dcn3_0_soc.clock_limits[i].phyclk_mhz = bw_params->clk_table.entries[1].phyclk_mhz;
+ dcn3_0_soc.clock_limits[i].dtbclk_mhz = dcn3_0_soc.clock_limits[0].dtbclk_mhz;
+ /* These clocks cannot come from bw_params, always fill from dcn3_0_soc[1] */
+ /* FCLK, PHYCLK_D18, SOCCLK, DSCCLK */
+ dcn3_0_soc.clock_limits[i].phyclk_d18_mhz = dcn3_0_soc.clock_limits[0].phyclk_d18_mhz;
+ dcn3_0_soc.clock_limits[i].socclk_mhz = dcn3_0_soc.clock_limits[0].socclk_mhz;
+ dcn3_0_soc.clock_limits[i].dscclk_mhz = dcn3_0_soc.clock_limits[0].dscclk_mhz;
+ }
+ /* re-init DML with updated bb */
+ dml_init_instance(&dc->dml, &dcn3_0_soc, &dcn3_0_ip, DML_PROJECT_DCN30);
+ if (dc->current_state)
+ dml_init_instance(&dc->current_state->bw_ctx.dml, &dcn3_0_soc, &dcn3_0_ip, DML_PROJECT_DCN30);
+ }
+
+ /* re-init DML with updated bb */
+ dml_init_instance(&dc->dml, &dcn3_0_soc, &dcn3_0_ip, DML_PROJECT_DCN30);
+ if (dc->current_state)
+ dml_init_instance(&dc->current_state->bw_ctx.dml, &dcn3_0_soc, &dcn3_0_ip, DML_PROJECT_DCN30);
+}
+
+static struct resource_funcs dcn30_res_pool_funcs = {
+ .destroy = dcn30_destroy_resource_pool,
+ .link_enc_create = dcn30_link_encoder_create,
+ .panel_cntl_create = dcn30_panel_cntl_create,
+ .validate_bandwidth = dcn30_validate_bandwidth,
+ .populate_dml_pipes = dcn30_populate_dml_pipes_from_context,
+ .acquire_idle_pipe_for_layer = dcn20_acquire_idle_pipe_for_layer,
+ .add_stream_to_ctx = dcn30_add_stream_to_ctx,
+ .remove_stream_from_ctx = dcn20_remove_stream_from_ctx,
+ .populate_dml_writeback_from_context = dcn30_populate_dml_writeback_from_context,
+ .set_mcif_arb_params = dcn30_set_mcif_arb_params,
+ .find_first_free_match_stream_enc_for_link = dcn10_find_first_free_match_stream_enc_for_link,
+ .acquire_post_bldn_3dlut = dcn30_acquire_post_bldn_3dlut,
+ .release_post_bldn_3dlut = dcn30_release_post_bldn_3dlut,
+ .update_bw_bounding_box = dcn30_update_bw_bounding_box,
+ .patch_unknown_plane_state = dcn20_patch_unknown_plane_state,
+};
+
+static bool dcn30_resource_construct(
+ uint8_t num_virtual_links,
+ struct dc *dc,
+ struct dcn30_resource_pool *pool)
+{
+ int i;
+ struct dc_context *ctx = dc->ctx;
+ struct irq_service_init_data init_data;
+
+ ctx->dc_bios->regs = &bios_regs;
+
+ pool->base.res_cap = &res_cap_dcn3;
+
+ pool->base.funcs = &dcn30_res_pool_funcs;
+
+ /*************************************************
+ * Resource + asic cap harcoding *
+ *************************************************/
+ pool->base.underlay_pipe_index = NO_UNDERLAY_PIPE;
+ pool->base.pipe_count = pool->base.res_cap->num_timing_generator;
+ pool->base.mpcc_count = pool->base.res_cap->num_timing_generator;
+ dc->caps.max_downscale_ratio = 600;
+ dc->caps.i2c_speed_in_khz = 100;
+ dc->caps.max_cursor_size = 256;
+ dc->caps.dmdata_alloc_size = 2048;
+
+ dc->caps.max_slave_planes = 1;
+ dc->caps.post_blend_color_processing = true;
+ dc->caps.force_dp_tps4_for_cp2520 = true;
+ dc->caps.extended_aux_timeout_support = true;
+ dc->caps.dmcub_support = true;
+
+ /* Color pipeline capabilities */
+ dc->caps.color.dpp.dcn_arch = 1;
+ dc->caps.color.dpp.input_lut_shared = 0;
+ dc->caps.color.dpp.icsc = 1;
+ dc->caps.color.dpp.dgam_ram = 0; // must use gamma_corr
+ dc->caps.color.dpp.dgam_rom_caps.srgb = 1;
+ dc->caps.color.dpp.dgam_rom_caps.bt2020 = 1;
+ dc->caps.color.dpp.dgam_rom_caps.gamma2_2 = 1;
+ dc->caps.color.dpp.dgam_rom_caps.pq = 1;
+ dc->caps.color.dpp.dgam_rom_caps.hlg = 1;
+ dc->caps.color.dpp.post_csc = 1;
+ dc->caps.color.dpp.gamma_corr = 1;
+
+ dc->caps.color.dpp.hw_3d_lut = 1;
+ dc->caps.color.dpp.ogam_ram = 1;
+ // no OGAM ROM on DCN3
+ dc->caps.color.dpp.ogam_rom_caps.srgb = 0;
+ dc->caps.color.dpp.ogam_rom_caps.bt2020 = 0;
+ dc->caps.color.dpp.ogam_rom_caps.gamma2_2 = 0;
+ dc->caps.color.dpp.ogam_rom_caps.pq = 0;
+ dc->caps.color.dpp.ogam_rom_caps.hlg = 0;
+ dc->caps.color.dpp.ocsc = 0;
+
+ dc->caps.color.mpc.gamut_remap = 1;
+ dc->caps.color.mpc.num_3dluts = pool->base.res_cap->num_mpc_3dlut; //3
+ dc->caps.color.mpc.ogam_ram = 1;
+ dc->caps.color.mpc.ogam_rom_caps.srgb = 0;
+ dc->caps.color.mpc.ogam_rom_caps.bt2020 = 0;
+ dc->caps.color.mpc.ogam_rom_caps.gamma2_2 = 0;
+ dc->caps.color.mpc.ogam_rom_caps.pq = 0;
+ dc->caps.color.mpc.ogam_rom_caps.hlg = 0;
+ dc->caps.color.mpc.ocsc = 1;
+
+ if (dc->ctx->dce_environment == DCE_ENV_PRODUCTION_DRV)
+ dc->debug = debug_defaults_drv;
+ else if (dc->ctx->dce_environment == DCE_ENV_FPGA_MAXIMUS) {
+ dc->debug = debug_defaults_diags;
+ } else
+ dc->debug = debug_defaults_diags;
+ // Init the vm_helper
+ if (dc->vm_helper)
+ vm_helper_init(dc->vm_helper, 16);
+
+ /*************************************************
+ * Create resources *
+ *************************************************/
+
+ /* Clock Sources for Pixel Clock*/
+ pool->base.clock_sources[DCN30_CLK_SRC_PLL0] =
+ dcn30_clock_source_create(ctx, ctx->dc_bios,
+ CLOCK_SOURCE_COMBO_PHY_PLL0,
+ &clk_src_regs[0], false);
+ pool->base.clock_sources[DCN30_CLK_SRC_PLL1] =
+ dcn30_clock_source_create(ctx, ctx->dc_bios,
+ CLOCK_SOURCE_COMBO_PHY_PLL1,
+ &clk_src_regs[1], false);
+ pool->base.clock_sources[DCN30_CLK_SRC_PLL2] =
+ dcn30_clock_source_create(ctx, ctx->dc_bios,
+ CLOCK_SOURCE_COMBO_PHY_PLL2,
+ &clk_src_regs[2], false);
+ pool->base.clock_sources[DCN30_CLK_SRC_PLL3] =
+ dcn30_clock_source_create(ctx, ctx->dc_bios,
+ CLOCK_SOURCE_COMBO_PHY_PLL3,
+ &clk_src_regs[3], false);
+ pool->base.clock_sources[DCN30_CLK_SRC_PLL4] =
+ dcn30_clock_source_create(ctx, ctx->dc_bios,
+ CLOCK_SOURCE_COMBO_PHY_PLL4,
+ &clk_src_regs[4], false);
+ pool->base.clock_sources[DCN30_CLK_SRC_PLL5] =
+ dcn30_clock_source_create(ctx, ctx->dc_bios,
+ CLOCK_SOURCE_COMBO_PHY_PLL5,
+ &clk_src_regs[5], false);
+
+ pool->base.clk_src_count = DCN30_CLK_SRC_TOTAL;
+
+ /* todo: not reuse phy_pll registers */
+ pool->base.dp_clock_source =
+ dcn30_clock_source_create(ctx, ctx->dc_bios,
+ CLOCK_SOURCE_ID_DP_DTO,
+ &clk_src_regs[0], true);
+
+ for (i = 0; i < pool->base.clk_src_count; i++) {
+ if (pool->base.clock_sources[i] == NULL) {
+ dm_error("DC: failed to create clock sources!\n");
+ BREAK_TO_DEBUGGER();
+ goto create_fail;
+ }
+ }
+
+ /* DCCG */
+ pool->base.dccg = dccg30_create(ctx, &dccg_regs, &dccg_shift, &dccg_mask);
+ if (pool->base.dccg == NULL) {
+ dm_error("DC: failed to create dccg!\n");
+ BREAK_TO_DEBUGGER();
+ goto create_fail;
+ }
+
+ /* PP Lib and SMU interfaces */
+ init_soc_bounding_box(dc, pool);
+
+ dml_init_instance(&dc->dml, &dcn3_0_soc, &dcn3_0_ip, DML_PROJECT_DCN30);
+
+ /* IRQ */
+ init_data.ctx = dc->ctx;
+ pool->base.irqs = dal_irq_service_dcn30_create(&init_data);
+ if (!pool->base.irqs)
+ goto create_fail;
+
+ /* HUBBUB */
+ pool->base.hubbub = dcn30_hubbub_create(ctx);
+ if (pool->base.hubbub == NULL) {
+ BREAK_TO_DEBUGGER();
+ dm_error("DC: failed to create hubbub!\n");
+ goto create_fail;
+ }
+
+ /* HUBPs, DPPs, OPPs and TGs */
+ for (i = 0; i < pool->base.pipe_count; i++) {
+ pool->base.hubps[i] = dcn30_hubp_create(ctx, i);
+ if (pool->base.hubps[i] == NULL) {
+ BREAK_TO_DEBUGGER();
+ dm_error(
+ "DC: failed to create hubps!\n");
+ goto create_fail;
+ }
+
+ pool->base.dpps[i] = dcn30_dpp_create(ctx, i);
+ if (pool->base.dpps[i] == NULL) {
+ BREAK_TO_DEBUGGER();
+ dm_error(
+ "DC: failed to create dpps!\n");
+ goto create_fail;
+ }
+ }
+
+ for (i = 0; i < pool->base.res_cap->num_opp; i++) {
+ pool->base.opps[i] = dcn30_opp_create(ctx, i);
+ if (pool->base.opps[i] == NULL) {
+ BREAK_TO_DEBUGGER();
+ dm_error(
+ "DC: failed to create output pixel processor!\n");
+ goto create_fail;
+ }
+ }
+
+ for (i = 0; i < pool->base.res_cap->num_timing_generator; i++) {
+ pool->base.timing_generators[i] = dcn30_timing_generator_create(
+ ctx, i);
+ if (pool->base.timing_generators[i] == NULL) {
+ BREAK_TO_DEBUGGER();
+ dm_error("DC: failed to create tg!\n");
+ goto create_fail;
+ }
+ }
+ pool->base.timing_generator_count = i;
+
+ /* ABM */
+ for (i = 0; i < pool->base.res_cap->num_timing_generator; i++) {
+ pool->base.multiple_abms[i] = dmub_abm_create(ctx,
+ &abm_regs[i],
+ &abm_shift,
+ &abm_mask);
+ if (pool->base.multiple_abms[i] == NULL) {
+ dm_error("DC: failed to create abm for pipe %d!\n", i);
+ BREAK_TO_DEBUGGER();
+ goto create_fail;
+ }
+ }
+ /* MPC and DSC */
+ pool->base.mpc = dcn30_mpc_create(ctx, pool->base.mpcc_count, pool->base.res_cap->num_mpc_3dlut);
+ if (pool->base.mpc == NULL) {
+ BREAK_TO_DEBUGGER();
+ dm_error("DC: failed to create mpc!\n");
+ goto create_fail;
+ }
+
+ for (i = 0; i < pool->base.res_cap->num_dsc; i++) {
+ pool->base.dscs[i] = dcn30_dsc_create(ctx, i);
+ if (pool->base.dscs[i] == NULL) {
+ BREAK_TO_DEBUGGER();
+ dm_error("DC: failed to create display stream compressor %d!\n", i);
+ goto create_fail;
+ }
+ }
+
+ /* DWB and MMHUBBUB */
+ if (!dcn30_dwbc_create(ctx, &pool->base)) {
+ BREAK_TO_DEBUGGER();
+ dm_error("DC: failed to create dwbc!\n");
+ goto create_fail;
+ }
+
+ if (!dcn30_mmhubbub_create(ctx, &pool->base)) {
+ BREAK_TO_DEBUGGER();
+ dm_error("DC: failed to create mcif_wb!\n");
+ goto create_fail;
+ }
+
+ /* AUX and I2C */
+ for (i = 0; i < pool->base.res_cap->num_ddc; i++) {
+ pool->base.engines[i] = dcn30_aux_engine_create(ctx, i);
+ if (pool->base.engines[i] == NULL) {
+ BREAK_TO_DEBUGGER();
+ dm_error(
+ "DC:failed to create aux engine!!\n");
+ goto create_fail;
+ }
+ pool->base.hw_i2cs[i] = dcn30_i2c_hw_create(ctx, i);
+ if (pool->base.hw_i2cs[i] == NULL) {
+ BREAK_TO_DEBUGGER();
+ dm_error(
+ "DC:failed to create hw i2c!!\n");
+ goto create_fail;
+ }
+ pool->base.sw_i2cs[i] = NULL;
+ }
+
+ /* Audio, Stream Encoders including DIG and virtual, MPC 3D LUTs */
+ if (!resource_construct(num_virtual_links, dc, &pool->base,
+ (!IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment) ?
+ &res_create_funcs : &res_create_maximus_funcs)))
+ goto create_fail;
+
+ /* HW Sequencer and Plane caps */
+ dcn30_hw_sequencer_construct(dc);
+
+ dc->caps.max_planes = pool->base.pipe_count;
+
+ for (i = 0; i < dc->caps.max_planes; ++i)
+ dc->caps.planes[i] = plane_cap;
+
+ dc->cap_funcs = cap_funcs;
+
+ return true;
+
+create_fail:
+
+ dcn30_resource_destruct(pool);
+
+ return false;
+}
+
+struct resource_pool *dcn30_create_resource_pool(
+ const struct dc_init_data *init_data,
+ struct dc *dc)
+{
+ struct dcn30_resource_pool *pool =
+ kzalloc(sizeof(struct dcn30_resource_pool), GFP_KERNEL);
+
+ if (!pool)
+ return NULL;
+
+ if (dcn30_resource_construct(init_data->num_virtual_links, dc, pool))
+ return &pool->base;
+
+ BREAK_TO_DEBUGGER();
+ kfree(pool);
+ return NULL;
+}
diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_resource.h b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_resource.h
new file mode 100644
index 000000000000..4b4a4d81c1e3
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_resource.h
@@ -0,0 +1,82 @@
+/*
+ * Copyright 2020 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef _DCN30_RESOURCE_H_
+#define _DCN30_RESOURCE_H_
+
+#include "core_types.h"
+
+#define TO_DCN30_RES_POOL(pool)\
+ container_of(pool, struct dcn30_resource_pool, base)
+
+struct dc;
+struct resource_pool;
+struct _vcs_dpi_display_pipe_params_st;
+
+struct dcn30_resource_pool {
+ struct resource_pool base;
+};
+struct resource_pool *dcn30_create_resource_pool(
+ const struct dc_init_data *init_data,
+ struct dc *dc);
+
+void dcn30_set_mcif_arb_params(
+ struct dc *dc,
+ struct dc_state *context,
+ display_e2e_pipe_params_st *pipes,
+ int pipe_cnt);
+
+unsigned int dcn30_calc_max_scaled_time(
+ unsigned int time_per_pixel,
+ enum mmhubbub_wbif_mode mode,
+ unsigned int urgent_watermark);
+
+bool dcn30_validate_bandwidth(struct dc *dc, struct dc_state *context,
+ bool fast_validate);
+void dcn30_populate_dml_writeback_from_context(
+ struct dc *dc, struct resource_context *res_ctx, display_e2e_pipe_params_st *pipes);
+
+int dcn30_populate_dml_pipes_from_context(
+ struct dc *dc, struct dc_state *context,
+ display_e2e_pipe_params_st *pipes);
+
+bool dcn30_acquire_post_bldn_3dlut(
+ struct resource_context *res_ctx,
+ const struct resource_pool *pool,
+ int mpcc_id,
+ struct dc_3dlut **lut,
+ struct dc_transfer_func **shaper);
+
+bool dcn30_release_post_bldn_3dlut(
+ struct resource_context *res_ctx,
+ const struct resource_pool *pool,
+ struct dc_3dlut **lut,
+ struct dc_transfer_func **shaper);
+
+enum dc_status dcn30_add_stream_to_ctx(
+ struct dc *dc,
+ struct dc_state *new_ctx,
+ struct dc_stream_state *dc_stream);
+#endif /* _DCN30_RESOURCE_H_ */
diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_vpg.c b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_vpg.c
new file mode 100644
index 000000000000..9c0020c8a730
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_vpg.c
@@ -0,0 +1,194 @@
+/*
+ * Copyright 2020 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+
+#include "dc_bios_types.h"
+#include "dcn30_vpg.h"
+#include "reg_helper.h"
+
+#define DC_LOGGER \
+ vpg3->base.ctx->logger
+
+#define REG(reg)\
+ (vpg3->regs->reg)
+
+#undef FN
+#define FN(reg_name, field_name) \
+ vpg3->vpg_shift->field_name, vpg3->vpg_mask->field_name
+
+
+#define CTX \
+ vpg3->base.ctx
+
+
+static void vpg3_update_generic_info_packet(
+ struct vpg *vpg,
+ uint32_t packet_index,
+ const struct dc_info_packet *info_packet)
+{
+ struct dcn30_vpg *vpg3 = DCN30_VPG_FROM_VPG(vpg);
+ uint32_t i;
+
+ /* TODOFPGA Figure out a proper number for max_retries polling for lock
+ * use 50 for now.
+ */
+ uint32_t max_retries = 50;
+
+ if (packet_index > 14)
+ ASSERT(0);
+
+ /* poll dig_update_lock is not locked -> asic internal signal
+ * assume otg master lock will unlock it
+ */
+ /* REG_WAIT(AFMT_VBI_PACKET_CONTROL, AFMT_GENERIC_LOCK_STATUS,
+ * 0, 10, max_retries);
+ */
+
+ /* TODO: Check if this is required */
+ /* check if HW reading GSP memory */
+ REG_WAIT(VPG_GENERIC_STATUS, VPG_GENERIC_CONFLICT_OCCURED,
+ 0, 10, max_retries);
+
+ /* HW does is not reading GSP memory not reading too long ->
+ * something wrong. clear GPS memory access and notify?
+ * hw SW is writing to GSP memory
+ */
+ REG_UPDATE(VPG_GENERIC_STATUS, VPG_GENERIC_CONFLICT_CLR, 1);
+
+ /* choose which generic packet to use */
+ REG_UPDATE(VPG_GENERIC_PACKET_ACCESS_CTRL,
+ VPG_GENERIC_DATA_INDEX, packet_index*9);
+
+ /* write generic packet header
+ * (4th byte is for GENERIC0 only)
+ */
+ REG_SET_4(VPG_GENERIC_PACKET_DATA, 0,
+ VPG_GENERIC_DATA_BYTE0, info_packet->hb0,
+ VPG_GENERIC_DATA_BYTE1, info_packet->hb1,
+ VPG_GENERIC_DATA_BYTE2, info_packet->hb2,
+ VPG_GENERIC_DATA_BYTE3, info_packet->hb3);
+
+ /* write generic packet contents
+ * (we never use last 4 bytes)
+ * there are 8 (0-7) mmDIG0_AFMT_GENERIC0_x registers
+ */
+ {
+ const uint32_t *content =
+ (const uint32_t *) &info_packet->sb[0];
+
+ for (i = 0; i < 8; i++) {
+ REG_WRITE(VPG_GENERIC_PACKET_DATA, *content++);
+ }
+ }
+
+ /* atomically update double-buffered GENERIC0 registers in frame mode
+ * (update at next block_update when block_update_lock == 0).
+ */
+ switch (packet_index) {
+ case 0:
+ REG_UPDATE(VPG_GSP_FRAME_UPDATE_CTRL,
+ VPG_GENERIC0_FRAME_UPDATE, 1);
+ break;
+ case 1:
+ REG_UPDATE(VPG_GSP_FRAME_UPDATE_CTRL,
+ VPG_GENERIC1_FRAME_UPDATE, 1);
+ break;
+ case 2:
+ REG_UPDATE(VPG_GSP_FRAME_UPDATE_CTRL,
+ VPG_GENERIC2_FRAME_UPDATE, 1);
+ break;
+ case 3:
+ REG_UPDATE(VPG_GSP_FRAME_UPDATE_CTRL,
+ VPG_GENERIC3_FRAME_UPDATE, 1);
+ break;
+ case 4:
+ REG_UPDATE(VPG_GSP_FRAME_UPDATE_CTRL,
+ VPG_GENERIC4_FRAME_UPDATE, 1);
+ break;
+ case 5:
+ REG_UPDATE(VPG_GSP_FRAME_UPDATE_CTRL,
+ VPG_GENERIC5_FRAME_UPDATE, 1);
+ break;
+ case 6:
+ REG_UPDATE(VPG_GSP_FRAME_UPDATE_CTRL,
+ VPG_GENERIC6_FRAME_UPDATE, 1);
+ break;
+ case 7:
+ REG_UPDATE(VPG_GSP_FRAME_UPDATE_CTRL,
+ VPG_GENERIC7_FRAME_UPDATE, 1);
+ break;
+ case 8:
+ REG_UPDATE(VPG_GSP_FRAME_UPDATE_CTRL,
+ VPG_GENERIC8_FRAME_UPDATE, 1);
+ break;
+ case 9:
+ REG_UPDATE(VPG_GSP_FRAME_UPDATE_CTRL,
+ VPG_GENERIC9_FRAME_UPDATE, 1);
+ break;
+ case 10:
+ REG_UPDATE(VPG_GSP_FRAME_UPDATE_CTRL,
+ VPG_GENERIC10_FRAME_UPDATE, 1);
+ break;
+ case 11:
+ REG_UPDATE(VPG_GSP_FRAME_UPDATE_CTRL,
+ VPG_GENERIC11_FRAME_UPDATE, 1);
+ break;
+ case 12:
+ REG_UPDATE(VPG_GSP_FRAME_UPDATE_CTRL,
+ VPG_GENERIC12_FRAME_UPDATE, 1);
+ break;
+ case 13:
+ REG_UPDATE(VPG_GSP_FRAME_UPDATE_CTRL,
+ VPG_GENERIC13_FRAME_UPDATE, 1);
+ break;
+ case 14:
+ REG_UPDATE(VPG_GSP_FRAME_UPDATE_CTRL,
+ VPG_GENERIC14_FRAME_UPDATE, 1);
+ break;
+ default:
+ break;
+ }
+}
+
+static struct vpg_funcs dcn30_vpg_funcs = {
+ .update_generic_info_packet = vpg3_update_generic_info_packet,
+};
+
+void vpg3_construct(struct dcn30_vpg *vpg3,
+ struct dc_context *ctx,
+ uint32_t inst,
+ const struct dcn30_vpg_registers *vpg_regs,
+ const struct dcn30_vpg_shift *vpg_shift,
+ const struct dcn30_vpg_mask *vpg_mask)
+{
+ vpg3->base.ctx = ctx;
+
+ vpg3->base.inst = inst;
+ vpg3->base.funcs = &dcn30_vpg_funcs;
+
+ vpg3->regs = vpg_regs;
+ vpg3->vpg_shift = vpg_shift;
+ vpg3->vpg_mask = vpg_mask;
+}
diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_vpg.h b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_vpg.h
new file mode 100644
index 000000000000..0284092630f1
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_vpg.h
@@ -0,0 +1,133 @@
+/*
+ * Copyright 2020 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef __DAL_DCN30_VPG_H__
+#define __DAL_DCN30_VPG_H__
+
+
+#define DCN30_VPG_FROM_VPG(vpg)\
+ container_of(vpg, struct dcn30_vpg, base)
+
+#define VPG_DCN3_REG_LIST(id) \
+ SRI(VPG_GENERIC_STATUS, VPG, id), \
+ SRI(VPG_GENERIC_PACKET_ACCESS_CTRL, VPG, id), \
+ SRI(VPG_GENERIC_PACKET_DATA, VPG, id), \
+ SRI(VPG_GSP_FRAME_UPDATE_CTRL, VPG, id)
+
+struct dcn30_vpg_registers {
+ uint32_t VPG_GENERIC_STATUS;
+ uint32_t VPG_GENERIC_PACKET_ACCESS_CTRL;
+ uint32_t VPG_GENERIC_PACKET_DATA;
+ uint32_t VPG_GSP_FRAME_UPDATE_CTRL;
+};
+
+#define DCN3_VPG_MASK_SH_LIST(mask_sh)\
+ SE_SF(VPG0_VPG_GENERIC_STATUS, VPG_GENERIC_CONFLICT_OCCURED, mask_sh),\
+ SE_SF(VPG0_VPG_GENERIC_STATUS, VPG_GENERIC_CONFLICT_CLR, mask_sh),\
+ SE_SF(VPG0_VPG_GENERIC_PACKET_ACCESS_CTRL, VPG_GENERIC_DATA_INDEX, mask_sh),\
+ SE_SF(VPG0_VPG_GENERIC_PACKET_DATA, VPG_GENERIC_DATA_BYTE0, mask_sh),\
+ SE_SF(VPG0_VPG_GENERIC_PACKET_DATA, VPG_GENERIC_DATA_BYTE1, mask_sh),\
+ SE_SF(VPG0_VPG_GENERIC_PACKET_DATA, VPG_GENERIC_DATA_BYTE2, mask_sh),\
+ SE_SF(VPG0_VPG_GENERIC_PACKET_DATA, VPG_GENERIC_DATA_BYTE3, mask_sh),\
+ SE_SF(VPG0_VPG_GSP_FRAME_UPDATE_CTRL, VPG_GENERIC0_FRAME_UPDATE, mask_sh),\
+ SE_SF(VPG0_VPG_GSP_FRAME_UPDATE_CTRL, VPG_GENERIC1_FRAME_UPDATE, mask_sh),\
+ SE_SF(VPG0_VPG_GSP_FRAME_UPDATE_CTRL, VPG_GENERIC2_FRAME_UPDATE, mask_sh),\
+ SE_SF(VPG0_VPG_GSP_FRAME_UPDATE_CTRL, VPG_GENERIC3_FRAME_UPDATE, mask_sh),\
+ SE_SF(VPG0_VPG_GSP_FRAME_UPDATE_CTRL, VPG_GENERIC4_FRAME_UPDATE, mask_sh),\
+ SE_SF(VPG0_VPG_GSP_FRAME_UPDATE_CTRL, VPG_GENERIC5_FRAME_UPDATE, mask_sh),\
+ SE_SF(VPG0_VPG_GSP_FRAME_UPDATE_CTRL, VPG_GENERIC6_FRAME_UPDATE, mask_sh),\
+ SE_SF(VPG0_VPG_GSP_FRAME_UPDATE_CTRL, VPG_GENERIC7_FRAME_UPDATE, mask_sh),\
+ SE_SF(VPG0_VPG_GSP_FRAME_UPDATE_CTRL, VPG_GENERIC8_FRAME_UPDATE, mask_sh),\
+ SE_SF(VPG0_VPG_GSP_FRAME_UPDATE_CTRL, VPG_GENERIC9_FRAME_UPDATE, mask_sh),\
+ SE_SF(VPG0_VPG_GSP_FRAME_UPDATE_CTRL, VPG_GENERIC10_FRAME_UPDATE, mask_sh),\
+ SE_SF(VPG0_VPG_GSP_FRAME_UPDATE_CTRL, VPG_GENERIC11_FRAME_UPDATE, mask_sh),\
+ SE_SF(VPG0_VPG_GSP_FRAME_UPDATE_CTRL, VPG_GENERIC12_FRAME_UPDATE, mask_sh),\
+ SE_SF(VPG0_VPG_GSP_FRAME_UPDATE_CTRL, VPG_GENERIC13_FRAME_UPDATE, mask_sh),\
+ SE_SF(VPG0_VPG_GSP_FRAME_UPDATE_CTRL, VPG_GENERIC14_FRAME_UPDATE, mask_sh)
+
+#define VPG_DCN3_REG_FIELD_LIST(type) \
+ type VPG_GENERIC_CONFLICT_OCCURED;\
+ type VPG_GENERIC_CONFLICT_CLR;\
+ type VPG_GENERIC_DATA_INDEX;\
+ type VPG_GENERIC_DATA_BYTE0;\
+ type VPG_GENERIC_DATA_BYTE1;\
+ type VPG_GENERIC_DATA_BYTE2;\
+ type VPG_GENERIC_DATA_BYTE3;\
+ type VPG_GENERIC0_FRAME_UPDATE;\
+ type VPG_GENERIC1_FRAME_UPDATE;\
+ type VPG_GENERIC2_FRAME_UPDATE;\
+ type VPG_GENERIC3_FRAME_UPDATE;\
+ type VPG_GENERIC4_FRAME_UPDATE;\
+ type VPG_GENERIC5_FRAME_UPDATE;\
+ type VPG_GENERIC6_FRAME_UPDATE;\
+ type VPG_GENERIC7_FRAME_UPDATE;\
+ type VPG_GENERIC8_FRAME_UPDATE;\
+ type VPG_GENERIC9_FRAME_UPDATE;\
+ type VPG_GENERIC10_FRAME_UPDATE;\
+ type VPG_GENERIC11_FRAME_UPDATE;\
+ type VPG_GENERIC12_FRAME_UPDATE;\
+ type VPG_GENERIC13_FRAME_UPDATE;\
+ type VPG_GENERIC14_FRAME_UPDATE
+
+
+struct dcn30_vpg_shift {
+ VPG_DCN3_REG_FIELD_LIST(uint8_t);
+};
+
+struct dcn30_vpg_mask {
+ VPG_DCN3_REG_FIELD_LIST(uint32_t);
+};
+
+struct vpg;
+
+struct vpg_funcs {
+ void (*update_generic_info_packet)(
+ struct vpg *vpg,
+ uint32_t packet_index,
+ const struct dc_info_packet *info_packet);
+};
+
+struct vpg {
+ const struct vpg_funcs *funcs;
+ struct dc_context *ctx;
+ int inst;
+};
+
+struct dcn30_vpg {
+ struct vpg base;
+ const struct dcn30_vpg_registers *regs;
+ const struct dcn30_vpg_shift *vpg_shift;
+ const struct dcn30_vpg_mask *vpg_mask;
+};
+
+void vpg3_construct(struct dcn30_vpg *vpg3,
+ struct dc_context *ctx,
+ uint32_t inst,
+ const struct dcn30_vpg_registers *vpg_regs,
+ const struct dcn30_vpg_shift *vpg_shift,
+ const struct dcn30_vpg_mask *vpg_mask);
+
+
+#endif
diff --git a/drivers/gpu/drm/amd/display/dc/dm_cp_psp.h b/drivers/gpu/drm/amd/display/dc/dm_cp_psp.h
index 968c46dfb506..5da7677627a1 100644
--- a/drivers/gpu/drm/amd/display/dc/dm_cp_psp.h
+++ b/drivers/gpu/drm/amd/display/dc/dm_cp_psp.h
@@ -38,6 +38,7 @@ struct cp_psp_stream_config {
};
struct cp_psp_funcs {
+ bool (*enable_assr)(void *handle, struct dc_link *link);
void (*update_stream_config)(void *handle, struct cp_psp_stream_config *config);
};
diff --git a/drivers/gpu/drm/amd/display/dc/dm_helpers.h b/drivers/gpu/drm/amd/display/dc/dm_helpers.h
index 8bde1d688f2e..b2cc2bf95712 100644
--- a/drivers/gpu/drm/amd/display/dc/dm_helpers.h
+++ b/drivers/gpu/drm/amd/display/dc/dm_helpers.h
@@ -35,6 +35,29 @@
struct dp_mst_stream_allocation_table;
+#ifdef CONFIG_DRM_AMD_DC_DCN3_0
+/*
+ * Allocate memory accessible by the GPU
+ *
+ * frame buffer allocations must be aligned to a 4096-byte boundary
+ *
+ * Returns virtual address, sets addr to physical address
+ */
+void *dm_helpers_allocate_gpu_mem(
+ struct dc_context *ctx,
+ enum dc_gpu_mem_alloc_type type,
+ size_t size,
+ long long *addr);
+
+/*
+ * Free the GPU-accessible memory at the virtual address pvMem
+ */
+void dm_helpers_free_gpu_mem(
+ struct dc_context *ctx,
+ enum dc_gpu_mem_alloc_type type,
+ void *pvMem);
+
+#endif
enum dc_edid_status dm_helpers_parse_edid_caps(
struct dc_context *ctx,
const struct dc_edid *edid,
diff --git a/drivers/gpu/drm/amd/display/dc/dm_services.h b/drivers/gpu/drm/amd/display/dc/dm_services.h
index 968ff1fef486..fdd1943c828d 100644
--- a/drivers/gpu/drm/amd/display/dc/dm_services.h
+++ b/drivers/gpu/drm/amd/display/dc/dm_services.h
@@ -261,75 +261,6 @@ struct persistent_data_flag {
bool save_per_edid;
};
-/* Call to write data in registry editor for persistent data storage.
- *
- * \inputs sink - identify edid/link for registry folder creation
- * module name - identify folders for registry
- * key name - identify keys within folders for registry
- * params - value to write in defined folder/key
- * size - size of the input params
- * flag - determine whether to save by link or edid
- *
- * \returns true - call is successful
- * false - call failed
- *
- * sink module key
- * -----------------------------------------------------------------------------
- * NULL NULL NULL - failure
- * NULL NULL - - create key with param value
- * under base folder
- * NULL - NULL - create module folder under base folder
- * - NULL NULL - failure
- * NULL - - - create key under module folder
- * with no edid/link identification
- * - NULL - - create key with param value
- * under base folder
- * - - NULL - create module folder under base folder
- * - - - - create key under module folder
- * with edid/link identification
- */
-bool dm_write_persistent_data(struct dc_context *ctx,
- const struct dc_sink *sink,
- const char *module_name,
- const char *key_name,
- void *params,
- unsigned int size,
- struct persistent_data_flag *flag);
-
-
-/* Call to read data in registry editor for persistent data storage.
- *
- * \inputs sink - identify edid/link for registry folder creation
- * module name - identify folders for registry
- * key name - identify keys within folders for registry
- * size - size of the output params
- * flag - determine whether it was save by link or edid
- *
- * \returns params - value read from defined folder/key
- * true - call is successful
- * false - call failed
- *
- * sink module key
- * -----------------------------------------------------------------------------
- * NULL NULL NULL - failure
- * NULL NULL - - read key under base folder
- * NULL - NULL - failure
- * - NULL NULL - failure
- * NULL - - - read key under module folder
- * with no edid/link identification
- * - NULL - - read key under base folder
- * - - NULL - failure
- * - - - - read key under module folder
- * with edid/link identification
- */
-bool dm_read_persistent_data(struct dc_context *ctx,
- const struct dc_sink *sink,
- const char *module_name,
- const char *key_name,
- void *params,
- unsigned int size,
- struct persistent_data_flag *flag);
-
bool dm_query_extended_brightness_caps
(struct dc_context *ctx, enum dm_acpi_display_type display,
struct dm_acpi_atif_backlight_caps *pCaps);
diff --git a/drivers/gpu/drm/amd/display/dc/dml/Makefile b/drivers/gpu/drm/amd/display/dc/dml/Makefile
index e34c3376efc1..417331438c30 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/Makefile
+++ b/drivers/gpu/drm/amd/display/dc/dml/Makefile
@@ -61,6 +61,10 @@ CFLAGS_$(AMDDALPATH)/dc/dml/dcn20/display_rq_dlg_calc_20v2.o := $(dml_ccflags)
CFLAGS_$(AMDDALPATH)/dc/dml/dcn21/display_mode_vba_21.o := $(dml_ccflags)
CFLAGS_$(AMDDALPATH)/dc/dml/dcn21/display_rq_dlg_calc_21.o := $(dml_ccflags)
endif
+ifdef CONFIG_DRM_AMD_DC_DCN3_0
+CFLAGS_$(AMDDALPATH)/dc/dml/dcn30/display_mode_vba_30.o := $(dml_ccflags) -Wframe-larger-than=2048
+CFLAGS_$(AMDDALPATH)/dc/dml/dcn30/display_rq_dlg_calc_30.o := $(dml_ccflags)
+endif
CFLAGS_$(AMDDALPATH)/dc/dml/dml1_display_rq_dlg_calc.o := $(dml_ccflags)
CFLAGS_$(AMDDALPATH)/dc/dml/display_rq_dlg_helpers.o := $(dml_ccflags)
@@ -72,6 +76,9 @@ DML += dcn20/display_rq_dlg_calc_20v2.o dcn20/display_mode_vba_20v2.o
DML += dcn21/display_rq_dlg_calc_21.o dcn21/display_mode_vba_21.o
endif
+ifdef CONFIG_DRM_AMD_DC_DCN3_0
+DML += dcn30/display_mode_vba_30.o dcn30/display_rq_dlg_calc_30.o
+endif
AMD_DAL_DML = $(addprefix $(AMDDALPATH)/dc/dml/,$(DML))
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_rq_dlg_calc_20.c b/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_rq_dlg_calc_20.c
index ca807846032f..72423dc425dc 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_rq_dlg_calc_20.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_rq_dlg_calc_20.c
@@ -890,11 +890,6 @@ static void dml20_rq_dlg_get_dlg_params(struct display_mode_lib *mode_lib,
double refcyc_per_req_delivery_c;
unsigned int full_recout_width;
- double xfc_transfer_delay;
- double xfc_precharge_delay;
- double xfc_remote_surface_flip_latency;
- double xfc_dst_y_delta_drq_limit;
- double xfc_prefetch_margin;
double refcyc_per_req_delivery_pre_cur0;
double refcyc_per_req_delivery_cur0;
double refcyc_per_req_delivery_pre_cur1;
@@ -1344,22 +1339,6 @@ static void dml20_rq_dlg_get_dlg_params(struct display_mode_lib *mode_lib,
ASSERT(refcyc_per_req_delivery_c < dml_pow(2, 13));
}
- // XFC
- xfc_transfer_delay = get_xfc_transfer_delay(mode_lib, e2e_pipe_param, num_pipes, pipe_idx);
- xfc_precharge_delay = get_xfc_precharge_delay(mode_lib,
- e2e_pipe_param,
- num_pipes,
- pipe_idx);
- xfc_remote_surface_flip_latency = get_xfc_remote_surface_flip_latency(mode_lib,
- e2e_pipe_param,
- num_pipes,
- pipe_idx);
- xfc_dst_y_delta_drq_limit = xfc_remote_surface_flip_latency;
- xfc_prefetch_margin = get_xfc_prefetch_margin(mode_lib,
- e2e_pipe_param,
- num_pipes,
- pipe_idx);
-
// TTU - Cursor
refcyc_per_req_delivery_pre_cur0 = 0.0;
refcyc_per_req_delivery_cur0 = 0.0;
@@ -1510,17 +1489,7 @@ static void dml20_rq_dlg_get_dlg_params(struct display_mode_lib *mode_lib,
disp_dlg_regs->chunk_hdl_adjust_cur1 = 3;
disp_dlg_regs->dst_y_offset_cur1 = 0;
- disp_dlg_regs->xfc_reg_transfer_delay = xfc_transfer_delay;
- disp_dlg_regs->xfc_reg_precharge_delay = xfc_precharge_delay;
- disp_dlg_regs->xfc_reg_remote_surface_flip_latency = xfc_remote_surface_flip_latency;
- disp_dlg_regs->xfc_reg_prefetch_margin = dml_ceil(xfc_prefetch_margin * refclk_freq_in_mhz,
- 1);
-
- // slave has to have this value also set to off
- if (src->xfc_enable && !src->xfc_slave)
- disp_dlg_regs->dst_y_delta_drq_limit = dml_ceil(xfc_dst_y_delta_drq_limit, 1);
- else
- disp_dlg_regs->dst_y_delta_drq_limit = 0x7fff; // off
+ disp_dlg_regs->dst_y_delta_drq_limit = 0x7fff; // off
disp_ttu_regs->refcyc_per_req_delivery_pre_l = (unsigned int) (refcyc_per_req_delivery_pre_l
* dml_pow(2, 10));
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_rq_dlg_calc_20v2.c b/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_rq_dlg_calc_20v2.c
index 287b7a0ad108..9c78446c3a9d 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_rq_dlg_calc_20v2.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_rq_dlg_calc_20v2.c
@@ -890,11 +890,6 @@ static void dml20v2_rq_dlg_get_dlg_params(struct display_mode_lib *mode_lib,
double refcyc_per_req_delivery_c;
unsigned int full_recout_width;
- double xfc_transfer_delay;
- double xfc_precharge_delay;
- double xfc_remote_surface_flip_latency;
- double xfc_dst_y_delta_drq_limit;
- double xfc_prefetch_margin;
double refcyc_per_req_delivery_pre_cur0;
double refcyc_per_req_delivery_cur0;
double refcyc_per_req_delivery_pre_cur1;
@@ -1345,22 +1340,6 @@ static void dml20v2_rq_dlg_get_dlg_params(struct display_mode_lib *mode_lib,
ASSERT(refcyc_per_req_delivery_c < dml_pow(2, 13));
}
- // XFC
- xfc_transfer_delay = get_xfc_transfer_delay(mode_lib, e2e_pipe_param, num_pipes, pipe_idx);
- xfc_precharge_delay = get_xfc_precharge_delay(mode_lib,
- e2e_pipe_param,
- num_pipes,
- pipe_idx);
- xfc_remote_surface_flip_latency = get_xfc_remote_surface_flip_latency(mode_lib,
- e2e_pipe_param,
- num_pipes,
- pipe_idx);
- xfc_dst_y_delta_drq_limit = xfc_remote_surface_flip_latency;
- xfc_prefetch_margin = get_xfc_prefetch_margin(mode_lib,
- e2e_pipe_param,
- num_pipes,
- pipe_idx);
-
// TTU - Cursor
refcyc_per_req_delivery_pre_cur0 = 0.0;
refcyc_per_req_delivery_cur0 = 0.0;
@@ -1511,17 +1490,7 @@ static void dml20v2_rq_dlg_get_dlg_params(struct display_mode_lib *mode_lib,
disp_dlg_regs->chunk_hdl_adjust_cur1 = 3;
disp_dlg_regs->dst_y_offset_cur1 = 0;
- disp_dlg_regs->xfc_reg_transfer_delay = xfc_transfer_delay;
- disp_dlg_regs->xfc_reg_precharge_delay = xfc_precharge_delay;
- disp_dlg_regs->xfc_reg_remote_surface_flip_latency = xfc_remote_surface_flip_latency;
- disp_dlg_regs->xfc_reg_prefetch_margin = dml_ceil(xfc_prefetch_margin * refclk_freq_in_mhz,
- 1);
-
- // slave has to have this value also set to off
- if (src->xfc_enable && !src->xfc_slave)
- disp_dlg_regs->dst_y_delta_drq_limit = dml_ceil(xfc_dst_y_delta_drq_limit, 1);
- else
- disp_dlg_regs->dst_y_delta_drq_limit = 0x7fff; // off
+ disp_dlg_regs->dst_y_delta_drq_limit = 0x7fff; // off
disp_ttu_regs->refcyc_per_req_delivery_pre_l = (unsigned int) (refcyc_per_req_delivery_pre_l
* dml_pow(2, 10));
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn21/display_rq_dlg_calc_21.c b/drivers/gpu/drm/amd/display/dc/dml/dcn21/display_rq_dlg_calc_21.c
index 90a5fefef05b..edd41d358291 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn21/display_rq_dlg_calc_21.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn21/display_rq_dlg_calc_21.c
@@ -936,11 +936,6 @@ static void dml_rq_dlg_get_dlg_params(
double refcyc_per_req_delivery_c;
unsigned int full_recout_width;
- double xfc_transfer_delay;
- double xfc_precharge_delay;
- double xfc_remote_surface_flip_latency;
- double xfc_dst_y_delta_drq_limit;
- double xfc_prefetch_margin;
double refcyc_per_req_delivery_pre_cur0;
double refcyc_per_req_delivery_cur0;
double refcyc_per_req_delivery_pre_cur1;
@@ -1412,25 +1407,6 @@ static void dml_rq_dlg_get_dlg_params(
ASSERT(refcyc_per_req_delivery_c < dml_pow(2, 13));
}
- // XFC
- xfc_transfer_delay = get_xfc_transfer_delay(mode_lib, e2e_pipe_param, num_pipes, pipe_idx);
- xfc_precharge_delay = get_xfc_precharge_delay(
- mode_lib,
- e2e_pipe_param,
- num_pipes,
- pipe_idx);
- xfc_remote_surface_flip_latency = get_xfc_remote_surface_flip_latency(
- mode_lib,
- e2e_pipe_param,
- num_pipes,
- pipe_idx);
- xfc_dst_y_delta_drq_limit = xfc_remote_surface_flip_latency;
- xfc_prefetch_margin = get_xfc_prefetch_margin(
- mode_lib,
- e2e_pipe_param,
- num_pipes,
- pipe_idx);
-
// TTU - Cursor
refcyc_per_req_delivery_pre_cur0 = 0.0;
refcyc_per_req_delivery_cur0 = 0.0;
@@ -1621,17 +1597,7 @@ static void dml_rq_dlg_get_dlg_params(
disp_dlg_regs->chunk_hdl_adjust_cur1 = 3;
disp_dlg_regs->dst_y_offset_cur1 = 0;
- disp_dlg_regs->xfc_reg_transfer_delay = xfc_transfer_delay;
- disp_dlg_regs->xfc_reg_precharge_delay = xfc_precharge_delay;
- disp_dlg_regs->xfc_reg_remote_surface_flip_latency = xfc_remote_surface_flip_latency;
- disp_dlg_regs->xfc_reg_prefetch_margin = dml_ceil(
- xfc_prefetch_margin * refclk_freq_in_mhz, 1);
-
- // slave has to have this value also set to off
- if (src->xfc_enable && !src->xfc_slave)
- disp_dlg_regs->dst_y_delta_drq_limit = dml_ceil(xfc_dst_y_delta_drq_limit, 1);
- else
- disp_dlg_regs->dst_y_delta_drq_limit = 0x7fff; // off
+ disp_dlg_regs->dst_y_delta_drq_limit = 0x7fff; // off
disp_ttu_regs->refcyc_per_req_delivery_pre_l = (unsigned int) (refcyc_per_req_delivery_pre_l
* dml_pow(2, 10));
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn30/display_mode_vba_30.c b/drivers/gpu/drm/amd/display/dc/dml/dcn30/display_mode_vba_30.c
new file mode 100644
index 000000000000..75dc4fe41731
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn30/display_mode_vba_30.c
@@ -0,0 +1,6865 @@
+/*
+ * Copyright 2020 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifdef CONFIG_DRM_AMD_DC_DCN3_0
+#include "dc.h"
+#include "dc_link.h"
+#include "../display_mode_lib.h"
+#include "display_mode_vba_30.h"
+#include "../dml_inline_defs.h"
+
+
+/*
+ * NOTE:
+ * This file is gcc-parsable HW gospel, coming straight from HW engineers.
+ *
+ * It doesn't adhere to Linux kernel style and sometimes will do things in odd
+ * ways. Unless there is something clearly wrong with it the code should
+ * remain as-is as it provides us with a guarantee from HW that it is correct.
+ */
+
+
+typedef struct {
+ double DPPCLK;
+ double DISPCLK;
+ double PixelClock;
+ double DCFCLKDeepSleep;
+ unsigned int DPPPerPlane;
+ bool ScalerEnabled;
+ enum scan_direction_class SourceScan;
+ unsigned int BlockWidth256BytesY;
+ unsigned int BlockHeight256BytesY;
+ unsigned int BlockWidth256BytesC;
+ unsigned int BlockHeight256BytesC;
+ unsigned int InterlaceEnable;
+ unsigned int NumberOfCursors;
+ unsigned int VBlank;
+ unsigned int HTotal;
+ unsigned int DCCEnable;
+ bool ODMCombineEnabled;
+} Pipe;
+
+#define BPP_INVALID 0
+#define BPP_BLENDED_PIPE 0xffffffff
+
+static void DisplayPipeConfiguration(struct display_mode_lib *mode_lib);
+static void DISPCLKDPPCLKDCFCLKDeepSleepPrefetchParametersWatermarksAndPerformanceCalculation(
+ struct display_mode_lib *mode_lib);
+static unsigned int dscceComputeDelay(
+ unsigned int bpc,
+ double BPP,
+ unsigned int sliceWidth,
+ unsigned int numSlices,
+ enum output_format_class pixelFormat,
+ enum output_encoder_class Output);
+static unsigned int dscComputeDelay(
+ enum output_format_class pixelFormat,
+ enum output_encoder_class Output);
+// Super monster function with some 45 argument
+static bool CalculatePrefetchSchedule(
+ struct display_mode_lib *mode_lib,
+ double PercentOfIdealDRAMFabricAndSDPPortBWReceivedAfterUrgLatencyPixelMixedWithVMData,
+ double PercentOfIdealDRAMFabricAndSDPPortBWReceivedAfterUrgLatencyVMDataOnly,
+ Pipe *myPipe,
+ unsigned int DSCDelay,
+ double DPPCLKDelaySubtotalPlusCNVCFormater,
+ double DPPCLKDelaySCL,
+ double DPPCLKDelaySCLLBOnly,
+ double DPPCLKDelayCNVCCursor,
+ double DISPCLKDelaySubtotal,
+ unsigned int DPP_RECOUT_WIDTH,
+ enum output_format_class OutputFormat,
+ unsigned int MaxInterDCNTileRepeaters,
+ unsigned int VStartup,
+ unsigned int MaxVStartup,
+ unsigned int GPUVMPageTableLevels,
+ bool GPUVMEnable,
+ bool HostVMEnable,
+ unsigned int HostVMMaxNonCachedPageTableLevels,
+ double HostVMMinPageSize,
+ bool DynamicMetadataEnable,
+ bool DynamicMetadataVMEnabled,
+ int DynamicMetadataLinesBeforeActiveRequired,
+ unsigned int DynamicMetadataTransmittedBytes,
+ double UrgentLatency,
+ double UrgentExtraLatency,
+ double TCalc,
+ unsigned int PDEAndMetaPTEBytesFrame,
+ unsigned int MetaRowByte,
+ unsigned int PixelPTEBytesPerRow,
+ double PrefetchSourceLinesY,
+ unsigned int SwathWidthY,
+ int BytePerPixelY,
+ double VInitPreFillY,
+ unsigned int MaxNumSwathY,
+ double PrefetchSourceLinesC,
+ unsigned int SwathWidthC,
+ int BytePerPixelC,
+ double VInitPreFillC,
+ unsigned int MaxNumSwathC,
+ long swath_width_luma_ub,
+ long swath_width_chroma_ub,
+ unsigned int SwathHeightY,
+ unsigned int SwathHeightC,
+ double TWait,
+ bool ProgressiveToInterlaceUnitInOPP,
+ double *DSTXAfterScaler,
+ double *DSTYAfterScaler,
+ double *DestinationLinesForPrefetch,
+ double *PrefetchBandwidth,
+ double *DestinationLinesToRequestVMInVBlank,
+ double *DestinationLinesToRequestRowInVBlank,
+ double *VRatioPrefetchY,
+ double *VRatioPrefetchC,
+ double *RequiredPrefetchPixDataBWLuma,
+ double *RequiredPrefetchPixDataBWChroma,
+ bool *NotEnoughTimeForDynamicMetadata,
+ double *Tno_bw,
+ double *prefetch_vmrow_bw,
+ double *Tdmdl_vm,
+ double *Tdmdl,
+ unsigned int *VUpdateOffsetPix,
+ double *VUpdateWidthPix,
+ double *VReadyOffsetPix);
+static double RoundToDFSGranularityUp(double Clock, double VCOSpeed);
+static double RoundToDFSGranularityDown(double Clock, double VCOSpeed);
+static void CalculateDCCConfiguration(
+ bool DCCEnabled,
+ bool DCCProgrammingAssumesScanDirectionUnknown,
+ enum source_format_class SourcePixelFormat,
+ unsigned int ViewportWidthLuma,
+ unsigned int ViewportWidthChroma,
+ unsigned int ViewportHeightLuma,
+ unsigned int ViewportHeightChroma,
+ double DETBufferSize,
+ unsigned int RequestHeight256ByteLuma,
+ unsigned int RequestHeight256ByteChroma,
+ enum dm_swizzle_mode TilingFormat,
+ unsigned int BytePerPixelY,
+ unsigned int BytePerPixelC,
+ double BytePerPixelDETY,
+ double BytePerPixelDETC,
+ enum scan_direction_class ScanOrientation,
+ unsigned int *MaxUncompressedBlockLuma,
+ unsigned int *MaxUncompressedBlockChroma,
+ unsigned int *MaxCompressedBlockLuma,
+ unsigned int *MaxCompressedBlockChroma,
+ unsigned int *IndependentBlockLuma,
+ unsigned int *IndependentBlockChroma);
+static double CalculatePrefetchSourceLines(
+ struct display_mode_lib *mode_lib,
+ double VRatio,
+ double vtaps,
+ bool Interlace,
+ bool ProgressiveToInterlaceUnitInOPP,
+ unsigned int SwathHeight,
+ unsigned int ViewportYStart,
+ double *VInitPreFill,
+ unsigned int *MaxNumSwath);
+static unsigned int CalculateVMAndRowBytes(
+ struct display_mode_lib *mode_lib,
+ bool DCCEnable,
+ unsigned int BlockHeight256Bytes,
+ unsigned int BlockWidth256Bytes,
+ enum source_format_class SourcePixelFormat,
+ unsigned int SurfaceTiling,
+ unsigned int BytePerPixel,
+ enum scan_direction_class ScanDirection,
+ unsigned int SwathWidth,
+ unsigned int ViewportHeight,
+ bool GPUVMEnable,
+ bool HostVMEnable,
+ unsigned int HostVMMaxNonCachedPageTableLevels,
+ unsigned int GPUVMMinPageSize,
+ unsigned int HostVMMinPageSize,
+ unsigned int PTEBufferSizeInRequests,
+ unsigned int Pitch,
+ unsigned int DCCMetaPitch,
+ unsigned int *MacroTileWidth,
+ unsigned int *MetaRowByte,
+ unsigned int *PixelPTEBytesPerRow,
+ bool *PTEBufferSizeNotExceeded,
+ unsigned int *dpte_row_width_ub,
+ unsigned int *dpte_row_height,
+ unsigned int *MetaRequestWidth,
+ unsigned int *MetaRequestHeight,
+ unsigned int *meta_row_width,
+ unsigned int *meta_row_height,
+ unsigned int *vm_group_bytes,
+ unsigned int *dpte_group_bytes,
+ unsigned int *PixelPTEReqWidth,
+ unsigned int *PixelPTEReqHeight,
+ unsigned int *PTERequestSize,
+ unsigned int *DPDE0BytesFrame,
+ unsigned int *MetaPTEBytesFrame);
+static double CalculateTWait(
+ unsigned int PrefetchMode,
+ double DRAMClockChangeLatency,
+ double UrgentLatency,
+ double SREnterPlusExitTime);
+static void CalculateRowBandwidth(
+ bool GPUVMEnable,
+ enum source_format_class SourcePixelFormat,
+ double VRatio,
+ double VRatioChroma,
+ bool DCCEnable,
+ double LineTime,
+ unsigned int MetaRowByteLuma,
+ unsigned int MetaRowByteChroma,
+ unsigned int meta_row_height_luma,
+ unsigned int meta_row_height_chroma,
+ unsigned int PixelPTEBytesPerRowLuma,
+ unsigned int PixelPTEBytesPerRowChroma,
+ unsigned int dpte_row_height_luma,
+ unsigned int dpte_row_height_chroma,
+ double *meta_row_bw,
+ double *dpte_row_bw);
+static void CalculateFlipSchedule(
+ struct display_mode_lib *mode_lib,
+ double PercentOfIdealDRAMFabricAndSDPPortBWReceivedAfterUrgLatencyPixelMixedWithVMData,
+ double PercentOfIdealDRAMFabricAndSDPPortBWReceivedAfterUrgLatencyVMDataOnly,
+ double UrgentExtraLatency,
+ double UrgentLatency,
+ unsigned int GPUVMMaxPageTableLevels,
+ bool HostVMEnable,
+ unsigned int HostVMMaxNonCachedPageTableLevels,
+ bool GPUVMEnable,
+ double HostVMMinPageSize,
+ double PDEAndMetaPTEBytesPerFrame,
+ double MetaRowBytes,
+ double DPTEBytesPerRow,
+ double BandwidthAvailableForImmediateFlip,
+ unsigned int TotImmediateFlipBytes,
+ enum source_format_class SourcePixelFormat,
+ double LineTime,
+ double VRatio,
+ double VRatioChroma,
+ double Tno_bw,
+ bool DCCEnable,
+ unsigned int dpte_row_height,
+ unsigned int meta_row_height,
+ unsigned int dpte_row_height_chroma,
+ unsigned int meta_row_height_chroma,
+ double *DestinationLinesToRequestVMInImmediateFlip,
+ double *DestinationLinesToRequestRowInImmediateFlip,
+ double *final_flip_bw,
+ bool *ImmediateFlipSupportedForPipe);
+static double CalculateWriteBackDelay(
+ enum source_format_class WritebackPixelFormat,
+ double WritebackHRatio,
+ double WritebackVRatio,
+ unsigned int WritebackVTaps,
+ long WritebackDestinationWidth,
+ long WritebackDestinationHeight,
+ long WritebackSourceHeight,
+ unsigned int HTotal);
+static void CalculateDynamicMetadataParameters(
+ int MaxInterDCNTileRepeaters,
+ double DPPCLK,
+ double DISPCLK,
+ double DCFClkDeepSleep,
+ double PixelClock,
+ long HTotal,
+ long VBlank,
+ long DynamicMetadataTransmittedBytes,
+ long DynamicMetadataLinesBeforeActiveRequired,
+ int InterlaceEnable,
+ bool ProgressiveToInterlaceUnitInOPP,
+ double *Tsetup,
+ double *Tdmbf,
+ double *Tdmec,
+ double *Tdmsks);
+static void CalculateWatermarksAndDRAMSpeedChangeSupport(
+ struct display_mode_lib *mode_lib,
+ unsigned int PrefetchMode,
+ unsigned int NumberOfActivePlanes,
+ unsigned int MaxLineBufferLines,
+ unsigned int LineBufferSize,
+ unsigned int DPPOutputBufferPixels,
+ double DETBufferSizeInKByte,
+ unsigned int WritebackInterfaceBufferSize,
+ double DCFCLK,
+ double ReturnBW,
+ bool GPUVMEnable,
+ unsigned int dpte_group_bytes[],
+ unsigned int MetaChunkSize,
+ double UrgentLatency,
+ double ExtraLatency,
+ double WritebackLatency,
+ double WritebackChunkSize,
+ double SOCCLK,
+ double DRAMClockChangeLatency,
+ double SRExitTime,
+ double SREnterPlusExitTime,
+ double DCFCLKDeepSleep,
+ unsigned int DPPPerPlane[],
+ bool DCCEnable[],
+ double DPPCLK[],
+ double DETBufferSizeY[],
+ double DETBufferSizeC[],
+ unsigned int SwathHeightY[],
+ unsigned int SwathHeightC[],
+ unsigned int LBBitPerPixel[],
+ double SwathWidthY[],
+ double SwathWidthC[],
+ double HRatio[],
+ double HRatioChroma[],
+ unsigned int vtaps[],
+ unsigned int VTAPsChroma[],
+ double VRatio[],
+ double VRatioChroma[],
+ unsigned int HTotal[],
+ double PixelClock[],
+ unsigned int BlendingAndTiming[],
+ double BytePerPixelDETY[],
+ double BytePerPixelDETC[],
+ double DSTXAfterScaler[],
+ double DSTYAfterScaler[],
+ bool WritebackEnable[],
+ enum source_format_class WritebackPixelFormat[],
+ double WritebackDestinationWidth[],
+ double WritebackDestinationHeight[],
+ double WritebackSourceHeight[],
+ enum clock_change_support *DRAMClockChangeSupport,
+ double *UrgentWatermark,
+ double *WritebackUrgentWatermark,
+ double *DRAMClockChangeWatermark,
+ double *WritebackDRAMClockChangeWatermark,
+ double *StutterExitWatermark,
+ double *StutterEnterPlusExitWatermark,
+ double *MinActiveDRAMClockChangeLatencySupported);
+static void CalculateDCFCLKDeepSleep(
+ struct display_mode_lib *mode_lib,
+ unsigned int NumberOfActivePlanes,
+ int BytePerPixelY[],
+ int BytePerPixelC[],
+ double VRatio[],
+ double VRatioChroma[],
+ double SwathWidthY[],
+ double SwathWidthC[],
+ unsigned int DPPPerPlane[],
+ double HRatio[],
+ double HRatioChroma[],
+ double PixelClock[],
+ double PSCL_THROUGHPUT[],
+ double PSCL_THROUGHPUT_CHROMA[],
+ double DPPCLK[],
+ double ReadBandwidthLuma[],
+ double ReadBandwidthChroma[],
+ int ReturnBusWidth,
+ double *DCFCLKDeepSleep);
+static void CalculateUrgentBurstFactor(
+ long swath_width_luma_ub,
+ long swath_width_chroma_ub,
+ unsigned int DETBufferSizeInKByte,
+ unsigned int SwathHeightY,
+ unsigned int SwathHeightC,
+ double LineTime,
+ double UrgentLatency,
+ double CursorBufferSize,
+ unsigned int CursorWidth,
+ unsigned int CursorBPP,
+ double VRatio,
+ double VRatioC,
+ double BytePerPixelInDETY,
+ double BytePerPixelInDETC,
+ double DETBufferSizeY,
+ double DETBufferSizeC,
+ double *UrgentBurstFactorCursor,
+ double *UrgentBurstFactorLuma,
+ double *UrgentBurstFactorChroma,
+ bool *NotEnoughUrgentLatencyHiding);
+
+static void UseMinimumDCFCLK(
+ struct display_mode_lib *mode_lib,
+ int MaxInterDCNTileRepeaters,
+ int MaxPrefetchMode,
+ double FinalDRAMClockChangeLatency,
+ double SREnterPlusExitTime,
+ int ReturnBusWidth,
+ int RoundTripPingLatencyCycles,
+ int ReorderingBytes,
+ int PixelChunkSizeInKByte,
+ int MetaChunkSize,
+ bool GPUVMEnable,
+ int GPUVMMaxPageTableLevels,
+ bool HostVMEnable,
+ int NumberOfActivePlanes,
+ double HostVMMinPageSize,
+ int HostVMMaxNonCachedPageTableLevels,
+ bool DynamicMetadataVMEnabled,
+ enum immediate_flip_requirement ImmediateFlipRequirement,
+ bool ProgressiveToInterlaceUnitInOPP,
+ double MaxAveragePercentOfIdealSDPPortBWDisplayCanUseInNormalSystemOperation,
+ double PercentOfIdealDRAMFabricAndSDPPortBWReceivedAfterUrgLatencyPixelMixedWithVMData,
+ double PercentOfIdealDRAMFabricAndSDPPortBWReceivedAfterUrgLatencyVMDataOnly,
+ double PercentOfIdealDRAMFabricAndSDPPortBWReceivedAfterUrgLatencyPixelDataOnly,
+ int VTotal[],
+ int VActive[],
+ int DynamicMetadataTransmittedBytes[],
+ int DynamicMetadataLinesBeforeActiveRequired[],
+ bool Interlace[],
+ double RequiredDPPCLK[][2][DC__NUM_DPP__MAX],
+ double RequiredDISPCLK[][2],
+ double UrgLatency[],
+ unsigned int NoOfDPP[][2][DC__NUM_DPP__MAX],
+ double ProjectedDCFCLKDeepSleep[][2],
+ double MaximumVStartup[][2][DC__NUM_DPP__MAX],
+ double TotalVActivePixelBandwidth[][2],
+ double TotalVActiveCursorBandwidth[][2],
+ double TotalMetaRowBandwidth[][2],
+ double TotalDPTERowBandwidth[][2],
+ unsigned int TotalNumberOfActiveDPP[][2],
+ unsigned int TotalNumberOfDCCActiveDPP[][2],
+ int dpte_group_bytes[],
+ double PrefetchLinesY[][2][DC__NUM_DPP__MAX],
+ double PrefetchLinesC[][2][DC__NUM_DPP__MAX],
+ int swath_width_luma_ub_all_states[][2][DC__NUM_DPP__MAX],
+ int swath_width_chroma_ub_all_states[][2][DC__NUM_DPP__MAX],
+ int BytePerPixelY[],
+ int BytePerPixelC[],
+ int HTotal[],
+ double PixelClock[],
+ double PDEAndMetaPTEBytesPerFrame[][2][DC__NUM_DPP__MAX],
+ double DPTEBytesPerRow[][2][DC__NUM_DPP__MAX],
+ double MetaRowBytes[][2][DC__NUM_DPP__MAX],
+ bool DynamicMetadataEnable[],
+ double VActivePixelBandwidth[][2][DC__NUM_DPP__MAX],
+ double VActiveCursorBandwidth[][2][DC__NUM_DPP__MAX],
+ double ReadBandwidthLuma[],
+ double ReadBandwidthChroma[],
+ double DCFCLKPerState[],
+ double DCFCLKState[][2]);
+static void CalculatePixelDeliveryTimes(
+ unsigned int NumberOfActivePlanes,
+ double VRatio[],
+ double VRatioChroma[],
+ double VRatioPrefetchY[],
+ double VRatioPrefetchC[],
+ unsigned int swath_width_luma_ub[],
+ unsigned int swath_width_chroma_ub[],
+ unsigned int DPPPerPlane[],
+ double HRatio[],
+ double HRatioChroma[],
+ double PixelClock[],
+ double PSCL_THROUGHPUT[],
+ double PSCL_THROUGHPUT_CHROMA[],
+ double DPPCLK[],
+ int BytePerPixelC[],
+ enum scan_direction_class SourceScan[],
+ unsigned int NumberOfCursors[],
+ unsigned int CursorWidth[][2],
+ unsigned int CursorBPP[][2],
+ unsigned int BlockWidth256BytesY[],
+ unsigned int BlockHeight256BytesY[],
+ unsigned int BlockWidth256BytesC[],
+ unsigned int BlockHeight256BytesC[],
+ double DisplayPipeLineDeliveryTimeLuma[],
+ double DisplayPipeLineDeliveryTimeChroma[],
+ double DisplayPipeLineDeliveryTimeLumaPrefetch[],
+ double DisplayPipeLineDeliveryTimeChromaPrefetch[],
+ double DisplayPipeRequestDeliveryTimeLuma[],
+ double DisplayPipeRequestDeliveryTimeChroma[],
+ double DisplayPipeRequestDeliveryTimeLumaPrefetch[],
+ double DisplayPipeRequestDeliveryTimeChromaPrefetch[],
+ double CursorRequestDeliveryTime[],
+ double CursorRequestDeliveryTimePrefetch[]);
+
+static void CalculateMetaAndPTETimes(
+ int NumberOfActivePlanes,
+ bool GPUVMEnable,
+ int MetaChunkSize,
+ int MinMetaChunkSizeBytes,
+ int HTotal[],
+ double VRatio[],
+ double VRatioChroma[],
+ double DestinationLinesToRequestRowInVBlank[],
+ double DestinationLinesToRequestRowInImmediateFlip[],
+ bool DCCEnable[],
+ double PixelClock[],
+ int BytePerPixelY[],
+ int BytePerPixelC[],
+ enum scan_direction_class SourceScan[],
+ int dpte_row_height[],
+ int dpte_row_height_chroma[],
+ int meta_row_width[],
+ int meta_row_width_chroma[],
+ int meta_row_height[],
+ int meta_row_height_chroma[],
+ int meta_req_width[],
+ int meta_req_width_chroma[],
+ int meta_req_height[],
+ int meta_req_height_chroma[],
+ int dpte_group_bytes[],
+ int PTERequestSizeY[],
+ int PTERequestSizeC[],
+ int PixelPTEReqWidthY[],
+ int PixelPTEReqHeightY[],
+ int PixelPTEReqWidthC[],
+ int PixelPTEReqHeightC[],
+ int dpte_row_width_luma_ub[],
+ int dpte_row_width_chroma_ub[],
+ double DST_Y_PER_PTE_ROW_NOM_L[],
+ double DST_Y_PER_PTE_ROW_NOM_C[],
+ double DST_Y_PER_META_ROW_NOM_L[],
+ double DST_Y_PER_META_ROW_NOM_C[],
+ double TimePerMetaChunkNominal[],
+ double TimePerChromaMetaChunkNominal[],
+ double TimePerMetaChunkVBlank[],
+ double TimePerChromaMetaChunkVBlank[],
+ double TimePerMetaChunkFlip[],
+ double TimePerChromaMetaChunkFlip[],
+ double time_per_pte_group_nom_luma[],
+ double time_per_pte_group_vblank_luma[],
+ double time_per_pte_group_flip_luma[],
+ double time_per_pte_group_nom_chroma[],
+ double time_per_pte_group_vblank_chroma[],
+ double time_per_pte_group_flip_chroma[]);
+
+static void CalculateVMGroupAndRequestTimes(
+ unsigned int NumberOfActivePlanes,
+ bool GPUVMEnable,
+ unsigned int GPUVMMaxPageTableLevels,
+ unsigned int HTotal[],
+ int BytePerPixelC[],
+ double DestinationLinesToRequestVMInVBlank[],
+ double DestinationLinesToRequestVMInImmediateFlip[],
+ bool DCCEnable[],
+ double PixelClock[],
+ int dpte_row_width_luma_ub[],
+ int dpte_row_width_chroma_ub[],
+ int vm_group_bytes[],
+ unsigned int dpde0_bytes_per_frame_ub_l[],
+ unsigned int dpde0_bytes_per_frame_ub_c[],
+ int meta_pte_bytes_per_frame_ub_l[],
+ int meta_pte_bytes_per_frame_ub_c[],
+ double TimePerVMGroupVBlank[],
+ double TimePerVMGroupFlip[],
+ double TimePerVMRequestVBlank[],
+ double TimePerVMRequestFlip[]);
+
+static void CalculateStutterEfficiency(
+ int NumberOfActivePlanes,
+ long ROBBufferSizeInKByte,
+ double TotalDataReadBandwidth,
+ double DCFCLK,
+ double ReturnBW,
+ double SRExitTime,
+ bool SynchronizedVBlank,
+ int DPPPerPlane[],
+ double DETBufferSizeY[],
+ int BytePerPixelY[],
+ double BytePerPixelDETY[],
+ double SwathWidthY[],
+ int SwathHeightY[],
+ int SwathHeightC[],
+ double DCCRateLuma[],
+ double DCCRateChroma[],
+ int HTotal[],
+ int VTotal[],
+ double PixelClock[],
+ double VRatio[],
+ enum scan_direction_class SourceScan[],
+ int BlockHeight256BytesY[],
+ int BlockWidth256BytesY[],
+ int BlockHeight256BytesC[],
+ int BlockWidth256BytesC[],
+ int DCCYMaxUncompressedBlock[],
+ int DCCCMaxUncompressedBlock[],
+ int VActive[],
+ bool DCCEnable[],
+ bool WritebackEnable[],
+ double ReadBandwidthPlaneLuma[],
+ double ReadBandwidthPlaneChroma[],
+ double meta_row_bw[],
+ double dpte_row_bw[],
+ double *StutterEfficiencyNotIncludingVBlank,
+ double *StutterEfficiency);
+
+static void CalculateSwathAndDETConfiguration(
+ bool ForceSingleDPP,
+ int NumberOfActivePlanes,
+ long DETBufferSizeInKByte,
+ double MaximumSwathWidthLuma[],
+ double MaximumSwathWidthChroma[],
+ enum scan_direction_class SourceScan[],
+ enum source_format_class SourcePixelFormat[],
+ enum dm_swizzle_mode SurfaceTiling[],
+ int ViewportWidth[],
+ int ViewportHeight[],
+ int SurfaceWidthY[],
+ int SurfaceWidthC[],
+ int SurfaceHeightY[],
+ int SurfaceHeightC[],
+ int Read256BytesBlockHeightY[],
+ int Read256BytesBlockHeightC[],
+ int Read256BytesBlockWidthY[],
+ int Read256BytesBlockWidthC[],
+ enum odm_combine_mode ODMCombineEnabled[],
+ int BlendingAndTiming[],
+ int BytePerPixY[],
+ int BytePerPixC[],
+ double BytePerPixDETY[],
+ double BytePerPixDETC[],
+ int HActive[],
+ double HRatio[],
+ double HRatioChroma[],
+ int DPPPerPlane[],
+ int swath_width_luma_ub[],
+ int swath_width_chroma_ub[],
+ double SwathWidth[],
+ double SwathWidthChroma[],
+ int SwathHeightY[],
+ int SwathHeightC[],
+ double DETBufferSizeY[],
+ double DETBufferSizeC[],
+ bool ViewportSizeSupportPerPlane[],
+ bool *ViewportSizeSupport);
+static void CalculateSwathWidth(
+ bool ForceSingleDPP,
+ int NumberOfActivePlanes,
+ enum source_format_class SourcePixelFormat[],
+ enum scan_direction_class SourceScan[],
+ unsigned int ViewportWidth[],
+ unsigned int ViewportHeight[],
+ unsigned int SurfaceWidthY[],
+ unsigned int SurfaceWidthC[],
+ unsigned int SurfaceHeightY[],
+ unsigned int SurfaceHeightC[],
+ enum odm_combine_mode ODMCombineEnabled[],
+ int BytePerPixY[],
+ int BytePerPixC[],
+ int Read256BytesBlockHeightY[],
+ int Read256BytesBlockHeightC[],
+ int Read256BytesBlockWidthY[],
+ int Read256BytesBlockWidthC[],
+ int BlendingAndTiming[],
+ unsigned int HActive[],
+ double HRatio[],
+ int DPPPerPlane[],
+ double SwathWidthSingleDPPY[],
+ double SwathWidthSingleDPPC[],
+ double SwathWidthY[],
+ double SwathWidthC[],
+ int MaximumSwathHeightY[],
+ int MaximumSwathHeightC[],
+ unsigned int swath_width_luma_ub[],
+ unsigned int swath_width_chroma_ub[]);
+static double CalculateExtraLatency(
+ long RoundTripPingLatencyCycles,
+ long ReorderingBytes,
+ double DCFCLK,
+ int TotalNumberOfActiveDPP,
+ int PixelChunkSizeInKByte,
+ int TotalNumberOfDCCActiveDPP,
+ int MetaChunkSize,
+ double ReturnBW,
+ bool GPUVMEnable,
+ bool HostVMEnable,
+ int NumberOfActivePlanes,
+ int NumberOfDPP[],
+ int dpte_group_bytes[],
+ double PercentOfIdealDRAMFabricAndSDPPortBWReceivedAfterUrgLatencyPixelMixedWithVMData,
+ double PercentOfIdealDRAMFabricAndSDPPortBWReceivedAfterUrgLatencyVMDataOnly,
+ double HostVMMinPageSize,
+ int HostVMMaxNonCachedPageTableLevels);
+static double CalculateExtraLatencyBytes(
+ long ReorderingBytes,
+ int TotalNumberOfActiveDPP,
+ int PixelChunkSizeInKByte,
+ int TotalNumberOfDCCActiveDPP,
+ int MetaChunkSize,
+ bool GPUVMEnable,
+ bool HostVMEnable,
+ int NumberOfActivePlanes,
+ int NumberOfDPP[],
+ int dpte_group_bytes[],
+ double PercentOfIdealDRAMFabricAndSDPPortBWReceivedAfterUrgLatencyPixelMixedWithVMData,
+ double PercentOfIdealDRAMFabricAndSDPPortBWReceivedAfterUrgLatencyVMDataOnly,
+ double HostVMMinPageSize,
+ int HostVMMaxNonCachedPageTableLevels);
+static double CalculateUrgentLatency(
+ double UrgentLatencyPixelDataOnly,
+ double UrgentLatencyPixelMixedWithVMData,
+ double UrgentLatencyVMDataOnly,
+ bool DoUrgentLatencyAdjustment,
+ double UrgentLatencyAdjustmentFabricClockComponent,
+ double UrgentLatencyAdjustmentFabricClockReference,
+ double FabricClockSingle);
+
+static bool CalculateBytePerPixelAnd256BBlockSizes(
+ enum source_format_class SourcePixelFormat,
+ enum dm_swizzle_mode SurfaceTiling,
+ unsigned int *BytePerPixelY,
+ unsigned int *BytePerPixelC,
+ double *BytePerPixelDETY,
+ double *BytePerPixelDETC,
+ unsigned int *BlockHeight256BytesY,
+ unsigned int *BlockHeight256BytesC,
+ unsigned int *BlockWidth256BytesY,
+ unsigned int *BlockWidth256BytesC);
+
+void dml30_recalculate(struct display_mode_lib *mode_lib)
+{
+ ModeSupportAndSystemConfiguration(mode_lib);
+ PixelClockAdjustmentForProgressiveToInterlaceUnit(mode_lib);
+ DisplayPipeConfiguration(mode_lib);
+ DISPCLKDPPCLKDCFCLKDeepSleepPrefetchParametersWatermarksAndPerformanceCalculation(mode_lib);
+}
+
+static unsigned int dscceComputeDelay(
+ unsigned int bpc,
+ double BPP,
+ unsigned int sliceWidth,
+ unsigned int numSlices,
+ enum output_format_class pixelFormat,
+ enum output_encoder_class Output)
+{
+ // valid bpc = source bits per component in the set of {8, 10, 12}
+ // valid bpp = increments of 1/16 of a bit
+ // min = 6/7/8 in N420/N422/444, respectively
+ // max = such that compression is 1:1
+ //valid sliceWidth = number of pixels per slice line, must be less than or equal to 5184/numSlices (or 4096/numSlices in 420 mode)
+ //valid numSlices = number of slices in the horiziontal direction per DSC engine in the set of {1, 2, 3, 4}
+ //valid pixelFormat = pixel/color format in the set of {:N444_RGB, :S422, :N422, :N420}
+
+ // fixed value
+ unsigned int rcModelSize = 8192;
+
+ // N422/N420 operate at 2 pixels per clock
+ unsigned int pixelsPerClock, lstall, D, initalXmitDelay, w, s, ix, wx, P, l0, a, ax, L,
+ Delay, pixels;
+
+ if (pixelFormat == dm_420)
+ pixelsPerClock = 2;
+ // #all other modes operate at 1 pixel per clock
+ else if (pixelFormat == dm_444)
+ pixelsPerClock = 1;
+ else if (pixelFormat == dm_n422)
+ pixelsPerClock = 2;
+ else
+ pixelsPerClock = 1;
+
+ //initial transmit delay as per PPS
+ initalXmitDelay = dml_round(rcModelSize / 2.0 / BPP / pixelsPerClock);
+
+ //compute ssm delay
+ if (bpc == 8)
+ D = 81;
+ else if (bpc == 10)
+ D = 89;
+ else
+ D = 113;
+
+ //divide by pixel per cycle to compute slice width as seen by DSC
+ w = sliceWidth / pixelsPerClock;
+
+ //422 mode has an additional cycle of delay
+ if (pixelFormat == dm_420 || pixelFormat == dm_444 || pixelFormat == dm_n422)
+ s = 0;
+ else
+ s = 1;
+
+ //main calculation for the dscce
+ ix = initalXmitDelay + 45;
+ wx = (w + 2) / 3;
+ P = 3 * wx - w;
+ l0 = ix / w;
+ a = ix + P * l0;
+ ax = (a + 2) / 3 + D + 6 + 1;
+ L = (ax + wx - 1) / wx;
+ if ((ix % w) == 0 && P != 0)
+ lstall = 1;
+ else
+ lstall = 0;
+ Delay = L * wx * (numSlices - 1) + ax + s + lstall + 22;
+
+ //dsc processes 3 pixel containers per cycle and a container can contain 1 or 2 pixels
+ pixels = Delay * 3 * pixelsPerClock;
+ return pixels;
+}
+
+static unsigned int dscComputeDelay(enum output_format_class pixelFormat, enum output_encoder_class Output)
+{
+ unsigned int Delay = 0;
+
+ if (pixelFormat == dm_420) {
+ // sfr
+ Delay = Delay + 2;
+ // dsccif
+ Delay = Delay + 0;
+ // dscc - input deserializer
+ Delay = Delay + 3;
+ // dscc gets pixels every other cycle
+ Delay = Delay + 2;
+ // dscc - input cdc fifo
+ Delay = Delay + 12;
+ // dscc gets pixels every other cycle
+ Delay = Delay + 13;
+ // dscc - cdc uncertainty
+ Delay = Delay + 2;
+ // dscc - output cdc fifo
+ Delay = Delay + 7;
+ // dscc gets pixels every other cycle
+ Delay = Delay + 3;
+ // dscc - cdc uncertainty
+ Delay = Delay + 2;
+ // dscc - output serializer
+ Delay = Delay + 1;
+ // sft
+ Delay = Delay + 1;
+ } else if (pixelFormat == dm_n422) {
+ // sfr
+ Delay = Delay + 2;
+ // dsccif
+ Delay = Delay + 1;
+ // dscc - input deserializer
+ Delay = Delay + 5;
+ // dscc - input cdc fifo
+ Delay = Delay + 25;
+ // dscc - cdc uncertainty
+ Delay = Delay + 2;
+ // dscc - output cdc fifo
+ Delay = Delay + 10;
+ // dscc - cdc uncertainty
+ Delay = Delay + 2;
+ // dscc - output serializer
+ Delay = Delay + 1;
+ // sft
+ Delay = Delay + 1;
+ }
+ else {
+ // sfr
+ Delay = Delay + 2;
+ // dsccif
+ Delay = Delay + 0;
+ // dscc - input deserializer
+ Delay = Delay + 3;
+ // dscc - input cdc fifo
+ Delay = Delay + 12;
+ // dscc - cdc uncertainty
+ Delay = Delay + 2;
+ // dscc - output cdc fifo
+ Delay = Delay + 7;
+ // dscc - output serializer
+ Delay = Delay + 1;
+ // dscc - cdc uncertainty
+ Delay = Delay + 2;
+ // sft
+ Delay = Delay + 1;
+ }
+
+ return Delay;
+}
+
+static bool CalculatePrefetchSchedule(
+ struct display_mode_lib *mode_lib,
+ double PercentOfIdealDRAMFabricAndSDPPortBWReceivedAfterUrgLatencyPixelMixedWithVMData,
+ double PercentOfIdealDRAMFabricAndSDPPortBWReceivedAfterUrgLatencyVMDataOnly,
+ Pipe *myPipe,
+ unsigned int DSCDelay,
+ double DPPCLKDelaySubtotalPlusCNVCFormater,
+ double DPPCLKDelaySCL,
+ double DPPCLKDelaySCLLBOnly,
+ double DPPCLKDelayCNVCCursor,
+ double DISPCLKDelaySubtotal,
+ unsigned int DPP_RECOUT_WIDTH,
+ enum output_format_class OutputFormat,
+ unsigned int MaxInterDCNTileRepeaters,
+ unsigned int VStartup,
+ unsigned int MaxVStartup,
+ unsigned int GPUVMPageTableLevels,
+ bool GPUVMEnable,
+ bool HostVMEnable,
+ unsigned int HostVMMaxNonCachedPageTableLevels,
+ double HostVMMinPageSize,
+ bool DynamicMetadataEnable,
+ bool DynamicMetadataVMEnabled,
+ int DynamicMetadataLinesBeforeActiveRequired,
+ unsigned int DynamicMetadataTransmittedBytes,
+ double UrgentLatency,
+ double UrgentExtraLatency,
+ double TCalc,
+ unsigned int PDEAndMetaPTEBytesFrame,
+ unsigned int MetaRowByte,
+ unsigned int PixelPTEBytesPerRow,
+ double PrefetchSourceLinesY,
+ unsigned int SwathWidthY,
+ int BytePerPixelY,
+ double VInitPreFillY,
+ unsigned int MaxNumSwathY,
+ double PrefetchSourceLinesC,
+ unsigned int SwathWidthC,
+ int BytePerPixelC,
+ double VInitPreFillC,
+ unsigned int MaxNumSwathC,
+ long swath_width_luma_ub,
+ long swath_width_chroma_ub,
+ unsigned int SwathHeightY,
+ unsigned int SwathHeightC,
+ double TWait,
+ bool ProgressiveToInterlaceUnitInOPP,
+ double *DSTXAfterScaler,
+ double *DSTYAfterScaler,
+ double *DestinationLinesForPrefetch,
+ double *PrefetchBandwidth,
+ double *DestinationLinesToRequestVMInVBlank,
+ double *DestinationLinesToRequestRowInVBlank,
+ double *VRatioPrefetchY,
+ double *VRatioPrefetchC,
+ double *RequiredPrefetchPixDataBWLuma,
+ double *RequiredPrefetchPixDataBWChroma,
+ bool *NotEnoughTimeForDynamicMetadata,
+ double *Tno_bw,
+ double *prefetch_vmrow_bw,
+ double *Tdmdl_vm,
+ double *Tdmdl,
+ unsigned int *VUpdateOffsetPix,
+ double *VUpdateWidthPix,
+ double *VReadyOffsetPix)
+{
+ bool MyError = false;
+ unsigned int DPPCycles = 0, DISPCLKCycles = 0;
+ double DSTTotalPixelsAfterScaler = 0;
+ double LineTime = 0, Tsetup = 0;
+ double dst_y_prefetch_equ = 0;
+ double Tsw_oto = 0;
+ double prefetch_bw_oto = 0;
+ double Tvm_oto = 0;
+ double Tr0_oto = 0;
+ double Tvm_oto_lines = 0;
+ double Tr0_oto_lines = 0;
+ double dst_y_prefetch_oto = 0;
+ double TimeForFetchingMetaPTE = 0;
+ double TimeForFetchingRowInVBlank = 0;
+ double LinesToRequestPrefetchPixelData = 0;
+ double HostVMInefficiencyFactor = 0;
+ unsigned int HostVMDynamicLevelsTrips = 0;
+ double trip_to_mem = 0;
+ double Tvm_trips = 0;
+ double Tr0_trips = 0;
+ double Tvm_trips_rounded = 0;
+ double Tr0_trips_rounded = 0;
+ double Lsw_oto = 0;
+ double Tpre_rounded = 0;
+ double prefetch_bw_equ = 0;
+ double Tvm_equ = 0;
+ double Tr0_equ = 0;
+ double Tdmbf = 0;
+ double Tdmec = 0;
+ double Tdmsks = 0;
+
+ if (GPUVMEnable == true && HostVMEnable == true) {
+ HostVMInefficiencyFactor = PercentOfIdealDRAMFabricAndSDPPortBWReceivedAfterUrgLatencyPixelMixedWithVMData / PercentOfIdealDRAMFabricAndSDPPortBWReceivedAfterUrgLatencyVMDataOnly;
+ HostVMDynamicLevelsTrips = HostVMMaxNonCachedPageTableLevels;
+ } else {
+ HostVMInefficiencyFactor = 1;
+ HostVMDynamicLevelsTrips = 0;
+ }
+
+ CalculateDynamicMetadataParameters(
+ MaxInterDCNTileRepeaters,
+ myPipe->DPPCLK,
+ myPipe->DISPCLK,
+ myPipe->DCFCLKDeepSleep,
+ myPipe->PixelClock,
+ myPipe->HTotal,
+ myPipe->VBlank,
+ DynamicMetadataTransmittedBytes,
+ DynamicMetadataLinesBeforeActiveRequired,
+ myPipe->InterlaceEnable,
+ ProgressiveToInterlaceUnitInOPP,
+ &Tsetup,
+ &Tdmbf,
+ &Tdmec,
+ &Tdmsks);
+
+ LineTime = myPipe->HTotal / myPipe->PixelClock;
+ trip_to_mem = UrgentLatency;
+ Tvm_trips = UrgentExtraLatency + trip_to_mem * (GPUVMPageTableLevels * (HostVMDynamicLevelsTrips + 1) - 1);
+
+ if (DynamicMetadataVMEnabled == true && GPUVMEnable == true) {
+ *Tdmdl = TWait + Tvm_trips + trip_to_mem;
+ } else {
+ *Tdmdl = TWait + UrgentExtraLatency;
+ }
+
+ if (DynamicMetadataEnable == true) {
+ if (VStartup * LineTime < Tsetup + *Tdmdl + Tdmbf + Tdmec + Tdmsks) {
+ *NotEnoughTimeForDynamicMetadata = true;
+ } else {
+ *NotEnoughTimeForDynamicMetadata = false;
+ dml_print("DML: Not Enough Time for Dynamic Meta!\n");
+ dml_print("DML: Tdmbf: %fus - time for dmd transfer from dchub to dio output buffer\n", Tdmbf);
+ dml_print("DML: Tdmec: %fus - time dio takes to transfer dmd\n", Tdmec);
+ dml_print("DML: Tdmsks: %fus - time before active dmd must complete transmission at dio\n", Tdmsks);
+ dml_print("DML: Tdmdl: %fus - time for fabric to become ready and fetch dmd \n", *Tdmdl);
+ }
+ } else {
+ *NotEnoughTimeForDynamicMetadata = false;
+ }
+
+ *Tdmdl_vm = (DynamicMetadataEnable == true && DynamicMetadataVMEnabled == true && GPUVMEnable == true ? TWait + Tvm_trips : 0);
+
+ if (myPipe->ScalerEnabled)
+ DPPCycles = DPPCLKDelaySubtotalPlusCNVCFormater + DPPCLKDelaySCL;
+ else
+ DPPCycles = DPPCLKDelaySubtotalPlusCNVCFormater + DPPCLKDelaySCLLBOnly;
+
+ DPPCycles = DPPCycles + myPipe->NumberOfCursors * DPPCLKDelayCNVCCursor;
+
+ DISPCLKCycles = DISPCLKDelaySubtotal;
+
+ if (myPipe->DPPCLK == 0.0 || myPipe->DISPCLK == 0.0)
+ return true;
+
+ *DSTXAfterScaler = DPPCycles * myPipe->PixelClock / myPipe->DPPCLK + DISPCLKCycles * myPipe->PixelClock / myPipe->DISPCLK
+ + DSCDelay;
+
+ *DSTXAfterScaler = *DSTXAfterScaler + ((myPipe->ODMCombineEnabled)?18:0) + (myPipe->DPPPerPlane - 1) * DPP_RECOUT_WIDTH;
+
+ if (OutputFormat == dm_420 || (myPipe->InterlaceEnable && ProgressiveToInterlaceUnitInOPP))
+ *DSTYAfterScaler = 1;
+ else
+ *DSTYAfterScaler = 0;
+
+ DSTTotalPixelsAfterScaler = *DSTYAfterScaler * myPipe->HTotal + *DSTXAfterScaler;
+ *DSTYAfterScaler = dml_floor(DSTTotalPixelsAfterScaler / myPipe->HTotal, 1);
+ *DSTXAfterScaler = DSTTotalPixelsAfterScaler - ((double) (*DSTYAfterScaler * myPipe->HTotal));
+
+ MyError = false;
+
+
+ Tr0_trips = trip_to_mem * (HostVMDynamicLevelsTrips + 1);
+ Tvm_trips_rounded = dml_ceil(4.0 * Tvm_trips / LineTime, 1) / 4 * LineTime;
+ Tr0_trips_rounded = dml_ceil(4.0 * Tr0_trips / LineTime, 1) / 4 * LineTime;
+
+ if (GPUVMEnable) {
+ if (GPUVMPageTableLevels >= 3) {
+ *Tno_bw = UrgentExtraLatency + trip_to_mem * ((GPUVMPageTableLevels - 2) - 1);
+ } else
+ *Tno_bw = 0;
+ } else if (!myPipe->DCCEnable)
+ *Tno_bw = LineTime;
+ else
+ *Tno_bw = LineTime / 4;
+
+ dst_y_prefetch_equ = VStartup - (Tsetup + dml_max(TWait + TCalc, *Tdmdl)) / LineTime
+ - (*DSTYAfterScaler + *DSTXAfterScaler / myPipe->HTotal);
+
+ Lsw_oto = dml_max(PrefetchSourceLinesY, PrefetchSourceLinesC);
+ Tsw_oto = Lsw_oto * LineTime;
+
+ prefetch_bw_oto = (PrefetchSourceLinesY * swath_width_luma_ub * BytePerPixelY + PrefetchSourceLinesC * swath_width_chroma_ub * BytePerPixelC) / Tsw_oto;
+
+ if (GPUVMEnable == true) {
+ Tvm_oto = dml_max3(*Tno_bw + PDEAndMetaPTEBytesFrame * HostVMInefficiencyFactor / prefetch_bw_oto,
+ Tvm_trips,
+ LineTime / 4.0);
+ } else
+ Tvm_oto = LineTime / 4.0;
+
+ if ((GPUVMEnable == true || myPipe->DCCEnable == true)) {
+ Tr0_oto = dml_max3(
+ (MetaRowByte + PixelPTEBytesPerRow * HostVMInefficiencyFactor) / prefetch_bw_oto,
+ LineTime - Tvm_oto, LineTime / 4);
+ } else
+ Tr0_oto = (LineTime - Tvm_oto) / 2.0;
+
+ Tvm_oto_lines = dml_ceil(4.0 * Tvm_oto / LineTime, 1) / 4.0;
+ Tr0_oto_lines = dml_ceil(4.0 * Tr0_oto / LineTime, 1) / 4.0;
+ dst_y_prefetch_oto = Tvm_oto_lines + 2 * Tr0_oto_lines + Lsw_oto;
+
+ dst_y_prefetch_equ = dml_floor(4.0 * (dst_y_prefetch_equ + 0.125), 1) / 4.0;
+ Tpre_rounded = dst_y_prefetch_equ * LineTime;
+
+ dml_print("DML: dst_y_prefetch_oto: %f\n", dst_y_prefetch_oto);
+ dml_print("DML: dst_y_prefetch_equ: %f\n", dst_y_prefetch_equ);
+
+ dml_print("DML: LineTime: %f\n", LineTime);
+ dml_print("DML: VStartup: %d\n", VStartup);
+ dml_print("DML: Tvstartup: %fus - time between vstartup and first pixel of active\n", VStartup * LineTime);
+ dml_print("DML: Tsetup: %fus - time from vstartup to vready\n", Tsetup);
+ dml_print("DML: TCalc: %fus - time for calculations in dchub starting at vready\n", TCalc);
+ dml_print("DML: TWait: %fus - time for fabric to become ready max(pstate exit,cstate enter/exit, urgent latency) after TCalc\n", TWait);
+ dml_print("DML: Tdmbf: %fus - time for dmd transfer from dchub to dio output buffer\n", Tdmbf);
+ dml_print("DML: Tdmec: %fus - time dio takes to transfer dmd\n", Tdmec);
+ dml_print("DML: Tdmsks: %fus - time before active dmd must complete transmission at dio\n", Tdmsks);
+ dml_print("DML: Tdmdl_vm: %fus - time for vm stages of dmd \n", *Tdmdl_vm);
+ dml_print("DML: Tdmdl: %fus - time for fabric to become ready and fetch dmd \n", *Tdmdl);
+ dml_print("DML: dst_x_after_scl: %f pixels - number of pixel clocks pipeline and buffer delay after scaler \n", *DSTXAfterScaler);
+ dml_print("DML: dst_y_after_scl: %d lines - number of lines of pipeline and buffer delay after scaler \n", (int)*DSTYAfterScaler);
+
+ *PrefetchBandwidth = 0;
+ *DestinationLinesToRequestVMInVBlank = 0;
+ *DestinationLinesToRequestRowInVBlank = 0;
+ *VRatioPrefetchY = 0;
+ *VRatioPrefetchC = 0;
+ *RequiredPrefetchPixDataBWLuma = 0;
+ if (dst_y_prefetch_equ > 1) {
+ double PrefetchBandwidth1 = 0;
+ double PrefetchBandwidth2 = 0;
+ double PrefetchBandwidth3 = 0;
+ double PrefetchBandwidth4 = 0;
+
+ if (Tpre_rounded - *Tno_bw > 0)
+ PrefetchBandwidth1 = (PDEAndMetaPTEBytesFrame * HostVMInefficiencyFactor + 2 * MetaRowByte
+ + 2 * PixelPTEBytesPerRow * HostVMInefficiencyFactor
+ + PrefetchSourceLinesY * swath_width_luma_ub * BytePerPixelY
+ + PrefetchSourceLinesC * swath_width_chroma_ub * BytePerPixelC)
+ / (Tpre_rounded - *Tno_bw);
+ else
+ PrefetchBandwidth1 = 0;
+
+ if (VStartup == MaxVStartup && (PrefetchBandwidth1 > 4 * prefetch_bw_oto) && (Tpre_rounded - Tsw_oto / 4 - 0.75 * LineTime - *Tno_bw) > 0) {
+ PrefetchBandwidth1 = (PDEAndMetaPTEBytesFrame * HostVMInefficiencyFactor + 2 * MetaRowByte + 2 * PixelPTEBytesPerRow * HostVMInefficiencyFactor) / (Tpre_rounded - Tsw_oto / 4 - 0.75 * LineTime - *Tno_bw);
+ }
+
+ if (Tpre_rounded - *Tno_bw - 2 * Tr0_trips_rounded > 0)
+ PrefetchBandwidth2 = (PDEAndMetaPTEBytesFrame *
+ HostVMInefficiencyFactor + PrefetchSourceLinesY *
+ swath_width_luma_ub * BytePerPixelY +
+ PrefetchSourceLinesC * swath_width_chroma_ub *
+ BytePerPixelC) /
+ (Tpre_rounded - *Tno_bw - 2 * Tr0_trips_rounded);
+ else
+ PrefetchBandwidth2 = 0;
+
+ if (Tpre_rounded - Tvm_trips_rounded > 0)
+ PrefetchBandwidth3 = (2 * MetaRowByte + 2 * PixelPTEBytesPerRow *
+ HostVMInefficiencyFactor + PrefetchSourceLinesY *
+ swath_width_luma_ub * BytePerPixelY + PrefetchSourceLinesC *
+ swath_width_chroma_ub * BytePerPixelC) / (Tpre_rounded -
+ Tvm_trips_rounded);
+ else
+ PrefetchBandwidth3 = 0;
+
+ if (VStartup == MaxVStartup && (PrefetchBandwidth3 > 4 * prefetch_bw_oto) && Tpre_rounded - Tsw_oto / 4 - 0.75 * LineTime - Tvm_trips_rounded > 0) {
+ PrefetchBandwidth3 = (2 * MetaRowByte + 2 * PixelPTEBytesPerRow * HostVMInefficiencyFactor) / (Tpre_rounded - Tsw_oto / 4 - 0.75 * LineTime - Tvm_trips_rounded);
+ }
+
+ if (Tpre_rounded - Tvm_trips_rounded - 2 * Tr0_trips_rounded > 0)
+ PrefetchBandwidth4 = (PrefetchSourceLinesY * swath_width_luma_ub * BytePerPixelY + PrefetchSourceLinesC * swath_width_chroma_ub * BytePerPixelC)
+ / (Tpre_rounded - Tvm_trips_rounded - 2 * Tr0_trips_rounded);
+ else
+ PrefetchBandwidth4 = 0;
+
+ {
+ bool Case1OK;
+ bool Case2OK;
+ bool Case3OK;
+
+ if (PrefetchBandwidth1 > 0) {
+ if (*Tno_bw + PDEAndMetaPTEBytesFrame * HostVMInefficiencyFactor / PrefetchBandwidth1
+ >= Tvm_trips_rounded && (MetaRowByte + PixelPTEBytesPerRow * HostVMInefficiencyFactor) / PrefetchBandwidth1 >= Tr0_trips_rounded) {
+ Case1OK = true;
+ } else {
+ Case1OK = false;
+ }
+ } else {
+ Case1OK = false;
+ }
+
+ if (PrefetchBandwidth2 > 0) {
+ if (*Tno_bw + PDEAndMetaPTEBytesFrame * HostVMInefficiencyFactor / PrefetchBandwidth2
+ >= Tvm_trips_rounded && (MetaRowByte + PixelPTEBytesPerRow * HostVMInefficiencyFactor) / PrefetchBandwidth2 < Tr0_trips_rounded) {
+ Case2OK = true;
+ } else {
+ Case2OK = false;
+ }
+ } else {
+ Case2OK = false;
+ }
+
+ if (PrefetchBandwidth3 > 0) {
+ if (*Tno_bw + PDEAndMetaPTEBytesFrame * HostVMInefficiencyFactor / PrefetchBandwidth3
+ < Tvm_trips_rounded && (MetaRowByte + PixelPTEBytesPerRow * HostVMInefficiencyFactor) / PrefetchBandwidth3 >= Tr0_trips_rounded) {
+ Case3OK = true;
+ } else {
+ Case3OK = false;
+ }
+ } else {
+ Case3OK = false;
+ }
+
+ if (Case1OK) {
+ prefetch_bw_equ = PrefetchBandwidth1;
+ } else if (Case2OK) {
+ prefetch_bw_equ = PrefetchBandwidth2;
+ } else if (Case3OK) {
+ prefetch_bw_equ = PrefetchBandwidth3;
+ } else {
+ prefetch_bw_equ = PrefetchBandwidth4;
+ }
+
+ dml_print("DML: prefetch_bw_equ: %f\n", prefetch_bw_equ);
+
+ if (prefetch_bw_equ > 0) {
+ if (GPUVMEnable == true) {
+ Tvm_equ = dml_max3(*Tno_bw + PDEAndMetaPTEBytesFrame * HostVMInefficiencyFactor / prefetch_bw_equ, Tvm_trips, LineTime / 4);
+ } else {
+ Tvm_equ = LineTime / 4;
+ }
+
+ if ((GPUVMEnable == true || myPipe->DCCEnable == true)) {
+ Tr0_equ = dml_max4(
+ (MetaRowByte + PixelPTEBytesPerRow * HostVMInefficiencyFactor) / prefetch_bw_equ,
+ Tr0_trips,
+ (LineTime - Tvm_equ) / 2,
+ LineTime / 4);
+ } else {
+ Tr0_equ = (LineTime - Tvm_equ) / 2;
+ }
+ } else {
+ Tvm_equ = 0;
+ Tr0_equ = 0;
+ dml_print("DML: prefetch_bw_equ equals 0! %s:%d\n", __FILE__, __LINE__);
+ }
+ }
+
+ if (dst_y_prefetch_oto < dst_y_prefetch_equ) {
+ *DestinationLinesForPrefetch = dst_y_prefetch_oto;
+ TimeForFetchingMetaPTE = Tvm_oto;
+ TimeForFetchingRowInVBlank = Tr0_oto;
+ *PrefetchBandwidth = prefetch_bw_oto;
+ } else {
+ *DestinationLinesForPrefetch = dst_y_prefetch_equ;
+ TimeForFetchingMetaPTE = Tvm_equ;
+ TimeForFetchingRowInVBlank = Tr0_equ;
+ *PrefetchBandwidth = prefetch_bw_equ;
+ }
+
+ *DestinationLinesToRequestVMInVBlank = dml_ceil(4.0 * TimeForFetchingMetaPTE / LineTime, 1.0) / 4.0;
+
+ *DestinationLinesToRequestRowInVBlank = dml_ceil(4.0 * TimeForFetchingRowInVBlank / LineTime, 1.0) / 4.0;
+
+
+ LinesToRequestPrefetchPixelData = *DestinationLinesForPrefetch - *DestinationLinesToRequestVMInVBlank
+ - 2 * *DestinationLinesToRequestRowInVBlank;
+
+ if (LinesToRequestPrefetchPixelData > 0 && prefetch_bw_equ > 0) {
+
+ *VRatioPrefetchY = (double) PrefetchSourceLinesY
+ / LinesToRequestPrefetchPixelData;
+ *VRatioPrefetchY = dml_max(*VRatioPrefetchY, 1.0);
+ if ((SwathHeightY > 4) && (VInitPreFillY > 3)) {
+ if (LinesToRequestPrefetchPixelData > (VInitPreFillY - 3.0) / 2.0) {
+ *VRatioPrefetchY = dml_max((double) PrefetchSourceLinesY / LinesToRequestPrefetchPixelData,
+ (double) MaxNumSwathY * SwathHeightY / (LinesToRequestPrefetchPixelData - (VInitPreFillY - 3.0) / 2.0));
+ *VRatioPrefetchY = dml_max(*VRatioPrefetchY, 1.0);
+ } else {
+ MyError = true;
+ dml_print("DML: MyErr set %s:%d\n", __FILE__, __LINE__);
+ *VRatioPrefetchY = 0;
+ }
+ }
+
+ *VRatioPrefetchC = (double) PrefetchSourceLinesC / LinesToRequestPrefetchPixelData;
+ *VRatioPrefetchC = dml_max(*VRatioPrefetchC, 1.0);
+
+ if ((SwathHeightC > 4)) {
+ if (LinesToRequestPrefetchPixelData > (VInitPreFillC - 3.0) / 2.0) {
+ *VRatioPrefetchC = dml_max(*VRatioPrefetchC,
+ (double) MaxNumSwathC * SwathHeightC / (LinesToRequestPrefetchPixelData - (VInitPreFillC - 3.0) / 2.0));
+ *VRatioPrefetchC = dml_max(*VRatioPrefetchC, 1.0);
+ } else {
+ MyError = true;
+ dml_print("DML: MyErr set %s:%d\n", __FILE__, __LINE__);
+ *VRatioPrefetchC = 0;
+ }
+ }
+
+ *RequiredPrefetchPixDataBWLuma = (double) PrefetchSourceLinesY / LinesToRequestPrefetchPixelData * BytePerPixelY * swath_width_luma_ub / LineTime;
+ *RequiredPrefetchPixDataBWChroma = (double) PrefetchSourceLinesC / LinesToRequestPrefetchPixelData * BytePerPixelC * swath_width_chroma_ub / LineTime;
+ } else {
+ MyError = true;
+ dml_print("DML: MyErr set %s:%d\n", __FILE__, __LINE__);
+ dml_print("DML: LinesToRequestPrefetchPixelData: %f, should be > 0\n", LinesToRequestPrefetchPixelData);
+ *VRatioPrefetchY = 0;
+ *VRatioPrefetchC = 0;
+ *RequiredPrefetchPixDataBWLuma = 0;
+ *RequiredPrefetchPixDataBWChroma = 0;
+ }
+
+ dml_print("DML: Tpre: %fus - sum of tim to request meta pte, 2 x data pte + meta data, swaths\n", (double)LinesToRequestPrefetchPixelData * LineTime + 2.0*TimeForFetchingRowInVBlank + TimeForFetchingMetaPTE);
+ dml_print("DML: Tvm: %fus - time to fetch page tables for meta surface\n", TimeForFetchingMetaPTE);
+ dml_print("DML: Tr0: %fus - time to fetch first row of data pagetables and first row of meta data (done in parallel)\n", TimeForFetchingRowInVBlank);
+ dml_print("DML: Tr1: %fus - time to fetch second row of data pagetables and second row of meta data (done in parallel)\n", TimeForFetchingRowInVBlank);
+ dml_print("DML: Tsw: %fus = time to fetch enough pixel data and cursor data to feed the scalers init position and detile\n", (double)LinesToRequestPrefetchPixelData * LineTime);
+ dml_print("DML: To: %fus - time for propagation from scaler to optc\n", (*DSTYAfterScaler + ((*DSTXAfterScaler) / (double) myPipe->HTotal)) * LineTime);
+ dml_print("DML: Tvstartup - Tsetup - Tcalc - Twait - Tpre - To > 0\n");
+ dml_print("DML: Tslack(pre): %fus - time left over in schedule\n", VStartup * LineTime - TimeForFetchingMetaPTE - 2 * TimeForFetchingRowInVBlank - (*DSTYAfterScaler + ((*DSTXAfterScaler) / (double) myPipe->HTotal)) * LineTime - TWait - TCalc - Tsetup);
+ dml_print("DML: row_bytes = dpte_row_bytes (per_pipe) = PixelPTEBytesPerRow = : %d\n", PixelPTEBytesPerRow);
+
+ } else {
+ MyError = true;
+ dml_print("DML: MyErr set %s:%d\n", __FILE__, __LINE__);
+ }
+
+ {
+ double prefetch_vm_bw = 0;
+ double prefetch_row_bw = 0;
+
+ if (PDEAndMetaPTEBytesFrame == 0) {
+ prefetch_vm_bw = 0;
+ } else if (*DestinationLinesToRequestVMInVBlank > 0) {
+ prefetch_vm_bw = PDEAndMetaPTEBytesFrame * HostVMInefficiencyFactor / (*DestinationLinesToRequestVMInVBlank * LineTime);
+ } else {
+ prefetch_vm_bw = 0;
+ MyError = true;
+ dml_print("DML: MyErr set %s:%d\n", __FILE__, __LINE__);
+ }
+ if (MetaRowByte + PixelPTEBytesPerRow == 0) {
+ prefetch_row_bw = 0;
+ } else if (*DestinationLinesToRequestRowInVBlank > 0) {
+ prefetch_row_bw = (MetaRowByte + PixelPTEBytesPerRow * HostVMInefficiencyFactor) / (*DestinationLinesToRequestRowInVBlank * LineTime);
+ } else {
+ prefetch_row_bw = 0;
+ MyError = true;
+ dml_print("DML: MyErr set %s:%d\n", __FILE__, __LINE__);
+ }
+
+ *prefetch_vmrow_bw = dml_max(prefetch_vm_bw, prefetch_row_bw);
+ }
+
+ if (MyError) {
+ *PrefetchBandwidth = 0;
+ TimeForFetchingMetaPTE = 0;
+ TimeForFetchingRowInVBlank = 0;
+ *DestinationLinesToRequestVMInVBlank = 0;
+ *DestinationLinesToRequestRowInVBlank = 0;
+ *DestinationLinesForPrefetch = 0;
+ LinesToRequestPrefetchPixelData = 0;
+ *VRatioPrefetchY = 0;
+ *VRatioPrefetchC = 0;
+ *RequiredPrefetchPixDataBWLuma = 0;
+ *RequiredPrefetchPixDataBWChroma = 0;
+ }
+
+ return MyError;
+}
+
+static double RoundToDFSGranularityUp(double Clock, double VCOSpeed)
+{
+ return VCOSpeed * 4 / dml_floor(VCOSpeed * 4 / Clock, 1);
+}
+
+static double RoundToDFSGranularityDown(double Clock, double VCOSpeed)
+{
+ return VCOSpeed * 4 / dml_ceil(VCOSpeed * 4.0 / Clock, 1);
+}
+
+static void CalculateDCCConfiguration(
+ bool DCCEnabled,
+ bool DCCProgrammingAssumesScanDirectionUnknown,
+ enum source_format_class SourcePixelFormat,
+ unsigned int SurfaceWidthLuma,
+ unsigned int SurfaceWidthChroma,
+ unsigned int SurfaceHeightLuma,
+ unsigned int SurfaceHeightChroma,
+ double DETBufferSize,
+ unsigned int RequestHeight256ByteLuma,
+ unsigned int RequestHeight256ByteChroma,
+ enum dm_swizzle_mode TilingFormat,
+ unsigned int BytePerPixelY,
+ unsigned int BytePerPixelC,
+ double BytePerPixelDETY,
+ double BytePerPixelDETC,
+ enum scan_direction_class ScanOrientation,
+ unsigned int *MaxUncompressedBlockLuma,
+ unsigned int *MaxUncompressedBlockChroma,
+ unsigned int *MaxCompressedBlockLuma,
+ unsigned int *MaxCompressedBlockChroma,
+ unsigned int *IndependentBlockLuma,
+ unsigned int *IndependentBlockChroma)
+{
+ int yuv420 = 0;
+ int horz_div_l = 0;
+ int horz_div_c = 0;
+ int vert_div_l = 0;
+ int vert_div_c = 0;
+
+ int req128_horz_wc_l = 0;
+ int req128_horz_wc_c = 0;
+ int req128_vert_wc_l = 0;
+ int req128_vert_wc_c = 0;
+ int segment_order_horz_contiguous_luma = 0;
+ int segment_order_horz_contiguous_chroma = 0;
+ int segment_order_vert_contiguous_luma = 0;
+ int segment_order_vert_contiguous_chroma = 0;
+
+ long full_swath_bytes_horz_wc_l = 0;
+ long full_swath_bytes_horz_wc_c = 0;
+ long full_swath_bytes_vert_wc_l = 0;
+ long full_swath_bytes_vert_wc_c = 0;
+
+ long swath_buf_size = 0;
+ double detile_buf_vp_horz_limit = 0;
+ double detile_buf_vp_vert_limit = 0;
+
+ long MAS_vp_horz_limit = 0;
+ long MAS_vp_vert_limit = 0;
+ long max_vp_horz_width = 0;
+ long max_vp_vert_height = 0;
+ long eff_surf_width_l = 0;
+ long eff_surf_width_c = 0;
+ long eff_surf_height_l = 0;
+ long eff_surf_height_c = 0;
+
+ typedef enum {
+ REQ_256Bytes,
+ REQ_128BytesNonContiguous,
+ REQ_128BytesContiguous,
+ REQ_NA
+ } RequestType;
+
+ RequestType RequestLuma;
+ RequestType RequestChroma;
+
+ yuv420 = ((SourcePixelFormat == dm_420_8 || SourcePixelFormat == dm_420_10 || SourcePixelFormat == dm_420_12) ? 1 : 0);
+ horz_div_l = 1;
+ horz_div_c = 1;
+ vert_div_l = 1;
+ vert_div_c = 1;
+
+ if (BytePerPixelY == 1)
+ vert_div_l = 0;
+ if (BytePerPixelC == 1)
+ vert_div_c = 0;
+ if (BytePerPixelY == 8
+ && (TilingFormat == dm_sw_64kb_s || TilingFormat == dm_sw_64kb_s_t
+ || TilingFormat == dm_sw_64kb_s_x))
+ horz_div_l = 0;
+ if (BytePerPixelC == 8
+ && (TilingFormat == dm_sw_64kb_s || TilingFormat == dm_sw_64kb_s_t
+ || TilingFormat == dm_sw_64kb_s_x))
+ horz_div_c = 0;
+
+ if (BytePerPixelC == 0) {
+ swath_buf_size = DETBufferSize / 2 - 2 * 256;
+ detile_buf_vp_horz_limit = (double) swath_buf_size
+ / ((double) RequestHeight256ByteLuma * BytePerPixelY
+ / (1 + horz_div_l));
+ detile_buf_vp_vert_limit = (double) swath_buf_size
+ / (256.0 / RequestHeight256ByteLuma / (1 + vert_div_l));
+ } else {
+ swath_buf_size = DETBufferSize / 2 - 2 * 2 * 256;
+ detile_buf_vp_horz_limit = (double) swath_buf_size
+ / ((double) RequestHeight256ByteLuma * BytePerPixelY
+ / (1 + horz_div_l)
+ + (double) RequestHeight256ByteChroma
+ * BytePerPixelC / (1 + horz_div_c)
+ / (1 + yuv420));
+ detile_buf_vp_vert_limit = (double) swath_buf_size
+ / (256.0 / RequestHeight256ByteLuma / (1 + vert_div_l)
+ + 256.0 / RequestHeight256ByteChroma
+ / (1 + vert_div_c) / (1 + yuv420));
+ }
+
+ if (SourcePixelFormat == dm_420_10) {
+ detile_buf_vp_horz_limit = 1.5 * detile_buf_vp_horz_limit;
+ detile_buf_vp_vert_limit = 1.5 * detile_buf_vp_vert_limit;
+ }
+
+ detile_buf_vp_horz_limit = dml_floor(detile_buf_vp_horz_limit - 1, 16);
+ detile_buf_vp_vert_limit = dml_floor(detile_buf_vp_vert_limit - 1, 16);
+
+ MAS_vp_horz_limit = 5760;
+ MAS_vp_vert_limit = (BytePerPixelC > 0 ? 2880 : 5760);
+ max_vp_horz_width = dml_min((double) MAS_vp_horz_limit, detile_buf_vp_horz_limit);
+ max_vp_vert_height = dml_min((double) MAS_vp_vert_limit, detile_buf_vp_vert_limit);
+ eff_surf_width_l =
+ (SurfaceWidthLuma > max_vp_horz_width ? max_vp_horz_width : SurfaceWidthLuma);
+ eff_surf_width_c = eff_surf_width_l / (1 + yuv420);
+ eff_surf_height_l = (
+ SurfaceHeightLuma > max_vp_vert_height ?
+ max_vp_vert_height : SurfaceHeightLuma);
+ eff_surf_height_c = eff_surf_height_l / (1 + yuv420);
+
+ full_swath_bytes_horz_wc_l = eff_surf_width_l * RequestHeight256ByteLuma * BytePerPixelY;
+ full_swath_bytes_vert_wc_l = eff_surf_height_l * 256 / RequestHeight256ByteLuma;
+ if (BytePerPixelC > 0) {
+ full_swath_bytes_horz_wc_c = eff_surf_width_c * RequestHeight256ByteChroma
+ * BytePerPixelC;
+ full_swath_bytes_vert_wc_c = eff_surf_height_c * 256 / RequestHeight256ByteChroma;
+ } else {
+ full_swath_bytes_horz_wc_c = 0;
+ full_swath_bytes_vert_wc_c = 0;
+ }
+
+ if (SourcePixelFormat == dm_420_10) {
+ full_swath_bytes_horz_wc_l = dml_ceil(full_swath_bytes_horz_wc_l * 2 / 3, 256);
+ full_swath_bytes_horz_wc_c = dml_ceil(full_swath_bytes_horz_wc_c * 2 / 3, 256);
+ full_swath_bytes_vert_wc_l = dml_ceil(full_swath_bytes_vert_wc_l * 2 / 3, 256);
+ full_swath_bytes_vert_wc_c = dml_ceil(full_swath_bytes_vert_wc_c * 2 / 3, 256);
+ }
+
+ if (2 * full_swath_bytes_horz_wc_l + 2 * full_swath_bytes_horz_wc_c <= DETBufferSize) {
+ req128_horz_wc_l = 0;
+ req128_horz_wc_c = 0;
+ } else if (full_swath_bytes_horz_wc_l < 1.5 * full_swath_bytes_horz_wc_c
+ && 2 * full_swath_bytes_horz_wc_l + full_swath_bytes_horz_wc_c
+ <= DETBufferSize) {
+ req128_horz_wc_l = 0;
+ req128_horz_wc_c = 1;
+ } else if (full_swath_bytes_horz_wc_l >= 1.5 * full_swath_bytes_horz_wc_c
+ && full_swath_bytes_horz_wc_l + 2 * full_swath_bytes_horz_wc_c
+ <= DETBufferSize) {
+ req128_horz_wc_l = 1;
+ req128_horz_wc_c = 0;
+ } else {
+ req128_horz_wc_l = 1;
+ req128_horz_wc_c = 1;
+ }
+
+ if (2 * full_swath_bytes_vert_wc_l + 2 * full_swath_bytes_vert_wc_c <= DETBufferSize) {
+ req128_vert_wc_l = 0;
+ req128_vert_wc_c = 0;
+ } else if (full_swath_bytes_vert_wc_l < 1.5 * full_swath_bytes_vert_wc_c
+ && 2 * full_swath_bytes_vert_wc_l + full_swath_bytes_vert_wc_c
+ <= DETBufferSize) {
+ req128_vert_wc_l = 0;
+ req128_vert_wc_c = 1;
+ } else if (full_swath_bytes_vert_wc_l >= 1.5 * full_swath_bytes_vert_wc_c
+ && full_swath_bytes_vert_wc_l + 2 * full_swath_bytes_vert_wc_c
+ <= DETBufferSize) {
+ req128_vert_wc_l = 1;
+ req128_vert_wc_c = 0;
+ } else {
+ req128_vert_wc_l = 1;
+ req128_vert_wc_c = 1;
+ }
+
+ if (BytePerPixelY == 2 || (BytePerPixelY == 4 && TilingFormat != dm_sw_64kb_r_x)) {
+ segment_order_horz_contiguous_luma = 0;
+ } else {
+ segment_order_horz_contiguous_luma = 1;
+ }
+ if ((BytePerPixelY == 8
+ && (TilingFormat == dm_sw_64kb_d || TilingFormat == dm_sw_64kb_d_x
+ || TilingFormat == dm_sw_64kb_d_t
+ || TilingFormat == dm_sw_64kb_r_x))
+ || (BytePerPixelY == 4 && TilingFormat == dm_sw_64kb_r_x)) {
+ segment_order_vert_contiguous_luma = 0;
+ } else {
+ segment_order_vert_contiguous_luma = 1;
+ }
+ if (BytePerPixelC == 2 || (BytePerPixelC == 4 && TilingFormat != dm_sw_64kb_r_x)) {
+ segment_order_horz_contiguous_chroma = 0;
+ } else {
+ segment_order_horz_contiguous_chroma = 1;
+ }
+ if ((BytePerPixelC == 8
+ && (TilingFormat == dm_sw_64kb_d || TilingFormat == dm_sw_64kb_d_x
+ || TilingFormat == dm_sw_64kb_d_t
+ || TilingFormat == dm_sw_64kb_r_x))
+ || (BytePerPixelC == 4 && TilingFormat == dm_sw_64kb_r_x)) {
+ segment_order_vert_contiguous_chroma = 0;
+ } else {
+ segment_order_vert_contiguous_chroma = 1;
+ }
+
+ if (DCCProgrammingAssumesScanDirectionUnknown == true) {
+ if (req128_horz_wc_l == 0 && req128_vert_wc_l == 0) {
+ RequestLuma = REQ_256Bytes;
+ } else if ((req128_horz_wc_l == 1 && segment_order_horz_contiguous_luma == 0)
+ || (req128_vert_wc_l == 1 && segment_order_vert_contiguous_luma == 0)) {
+ RequestLuma = REQ_128BytesNonContiguous;
+ } else {
+ RequestLuma = REQ_128BytesContiguous;
+ }
+ if (req128_horz_wc_c == 0 && req128_vert_wc_c == 0) {
+ RequestChroma = REQ_256Bytes;
+ } else if ((req128_horz_wc_c == 1 && segment_order_horz_contiguous_chroma == 0)
+ || (req128_vert_wc_c == 1
+ && segment_order_vert_contiguous_chroma == 0)) {
+ RequestChroma = REQ_128BytesNonContiguous;
+ } else {
+ RequestChroma = REQ_128BytesContiguous;
+ }
+ } else if (ScanOrientation != dm_vert) {
+ if (req128_horz_wc_l == 0) {
+ RequestLuma = REQ_256Bytes;
+ } else if (segment_order_horz_contiguous_luma == 0) {
+ RequestLuma = REQ_128BytesNonContiguous;
+ } else {
+ RequestLuma = REQ_128BytesContiguous;
+ }
+ if (req128_horz_wc_c == 0) {
+ RequestChroma = REQ_256Bytes;
+ } else if (segment_order_horz_contiguous_chroma == 0) {
+ RequestChroma = REQ_128BytesNonContiguous;
+ } else {
+ RequestChroma = REQ_128BytesContiguous;
+ }
+ } else {
+ if (req128_vert_wc_l == 0) {
+ RequestLuma = REQ_256Bytes;
+ } else if (segment_order_vert_contiguous_luma == 0) {
+ RequestLuma = REQ_128BytesNonContiguous;
+ } else {
+ RequestLuma = REQ_128BytesContiguous;
+ }
+ if (req128_vert_wc_c == 0) {
+ RequestChroma = REQ_256Bytes;
+ } else if (segment_order_vert_contiguous_chroma == 0) {
+ RequestChroma = REQ_128BytesNonContiguous;
+ } else {
+ RequestChroma = REQ_128BytesContiguous;
+ }
+ }
+
+ if (RequestLuma == REQ_256Bytes) {
+ *MaxUncompressedBlockLuma = 256;
+ *MaxCompressedBlockLuma = 256;
+ *IndependentBlockLuma = 0;
+ } else if (RequestLuma == REQ_128BytesContiguous) {
+ *MaxUncompressedBlockLuma = 256;
+ *MaxCompressedBlockLuma = 128;
+ *IndependentBlockLuma = 128;
+ } else {
+ *MaxUncompressedBlockLuma = 256;
+ *MaxCompressedBlockLuma = 64;
+ *IndependentBlockLuma = 64;
+ }
+
+ if (RequestChroma == REQ_256Bytes) {
+ *MaxUncompressedBlockChroma = 256;
+ *MaxCompressedBlockChroma = 256;
+ *IndependentBlockChroma = 0;
+ } else if (RequestChroma == REQ_128BytesContiguous) {
+ *MaxUncompressedBlockChroma = 256;
+ *MaxCompressedBlockChroma = 128;
+ *IndependentBlockChroma = 128;
+ } else {
+ *MaxUncompressedBlockChroma = 256;
+ *MaxCompressedBlockChroma = 64;
+ *IndependentBlockChroma = 64;
+ }
+
+ if (DCCEnabled != true || BytePerPixelC == 0) {
+ *MaxUncompressedBlockChroma = 0;
+ *MaxCompressedBlockChroma = 0;
+ *IndependentBlockChroma = 0;
+ }
+
+ if (DCCEnabled != true) {
+ *MaxUncompressedBlockLuma = 0;
+ *MaxCompressedBlockLuma = 0;
+ *IndependentBlockLuma = 0;
+ }
+}
+
+
+static double CalculatePrefetchSourceLines(
+ struct display_mode_lib *mode_lib,
+ double VRatio,
+ double vtaps,
+ bool Interlace,
+ bool ProgressiveToInterlaceUnitInOPP,
+ unsigned int SwathHeight,
+ unsigned int ViewportYStart,
+ double *VInitPreFill,
+ unsigned int *MaxNumSwath)
+{
+ unsigned int MaxPartialSwath = 0;
+
+ if (ProgressiveToInterlaceUnitInOPP)
+ *VInitPreFill = dml_floor((VRatio + vtaps + 1) / 2.0, 1);
+ else
+ *VInitPreFill = dml_floor((VRatio + vtaps + 1 + Interlace * 0.5 * VRatio) / 2.0, 1);
+
+ if (!mode_lib->vba.IgnoreViewportPositioning) {
+
+ *MaxNumSwath = dml_ceil((*VInitPreFill - 1.0) / SwathHeight, 1) + 1.0;
+
+ if (*VInitPreFill > 1.0)
+ MaxPartialSwath = (unsigned int) (*VInitPreFill - 2) % SwathHeight;
+ else
+ MaxPartialSwath = (unsigned int) (*VInitPreFill + SwathHeight - 2)
+ % SwathHeight;
+ MaxPartialSwath = dml_max(1U, MaxPartialSwath);
+
+ } else {
+
+ if (ViewportYStart != 0)
+ dml_print(
+ "WARNING DML: using viewport y position of 0 even though actual viewport y position is non-zero in prefetch source lines calculation\n");
+
+ *MaxNumSwath = dml_ceil(*VInitPreFill / SwathHeight, 1);
+
+ if (*VInitPreFill > 1.0)
+ MaxPartialSwath = (unsigned int) (*VInitPreFill - 1) % SwathHeight;
+ else
+ MaxPartialSwath = (unsigned int) (*VInitPreFill + SwathHeight - 1)
+ % SwathHeight;
+ }
+
+ return *MaxNumSwath * SwathHeight + MaxPartialSwath;
+}
+
+static unsigned int CalculateVMAndRowBytes(
+ struct display_mode_lib *mode_lib,
+ bool DCCEnable,
+ unsigned int BlockHeight256Bytes,
+ unsigned int BlockWidth256Bytes,
+ enum source_format_class SourcePixelFormat,
+ unsigned int SurfaceTiling,
+ unsigned int BytePerPixel,
+ enum scan_direction_class ScanDirection,
+ unsigned int SwathWidth,
+ unsigned int ViewportHeight,
+ bool GPUVMEnable,
+ bool HostVMEnable,
+ unsigned int HostVMMaxNonCachedPageTableLevels,
+ unsigned int GPUVMMinPageSize,
+ unsigned int HostVMMinPageSize,
+ unsigned int PTEBufferSizeInRequests,
+ unsigned int Pitch,
+ unsigned int DCCMetaPitch,
+ unsigned int *MacroTileWidth,
+ unsigned int *MetaRowByte,
+ unsigned int *PixelPTEBytesPerRow,
+ bool *PTEBufferSizeNotExceeded,
+ unsigned int *dpte_row_width_ub,
+ unsigned int *dpte_row_height,
+ unsigned int *MetaRequestWidth,
+ unsigned int *MetaRequestHeight,
+ unsigned int *meta_row_width,
+ unsigned int *meta_row_height,
+ unsigned int *vm_group_bytes,
+ unsigned int *dpte_group_bytes,
+ unsigned int *PixelPTEReqWidth,
+ unsigned int *PixelPTEReqHeight,
+ unsigned int *PTERequestSize,
+ unsigned int *DPDE0BytesFrame,
+ unsigned int *MetaPTEBytesFrame)
+{
+ unsigned int MPDEBytesFrame = 0;
+ unsigned int DCCMetaSurfaceBytes = 0;
+ unsigned int MacroTileSizeBytes = 0;
+ unsigned int MacroTileHeight = 0;
+ unsigned int ExtraDPDEBytesFrame = 0;
+ unsigned int PDEAndMetaPTEBytesFrame = 0;
+ unsigned int PixelPTEReqHeightPTEs = 0;
+ unsigned int HostVMDynamicLevels = 0;
+
+ double FractionOfPTEReturnDrop;
+
+ if (GPUVMEnable == true && HostVMEnable == true) {
+ if (HostVMMinPageSize < 2048) {
+ HostVMDynamicLevels = HostVMMaxNonCachedPageTableLevels;
+ } else if (HostVMMinPageSize >= 2048 && HostVMMinPageSize < 1048576) {
+ HostVMDynamicLevels = dml_max(0, (int) HostVMMaxNonCachedPageTableLevels - 1);
+ } else {
+ HostVMDynamicLevels = dml_max(0, (int) HostVMMaxNonCachedPageTableLevels - 2);
+ }
+ }
+
+ *MetaRequestHeight = 8 * BlockHeight256Bytes;
+ *MetaRequestWidth = 8 * BlockWidth256Bytes;
+ if (ScanDirection != dm_vert) {
+ *meta_row_height = *MetaRequestHeight;
+ *meta_row_width = dml_ceil((double) SwathWidth - 1, *MetaRequestWidth)
+ + *MetaRequestWidth;
+ *MetaRowByte = *meta_row_width * *MetaRequestHeight * BytePerPixel / 256.0;
+ } else {
+ *meta_row_height = *MetaRequestWidth;
+ *meta_row_width = dml_ceil((double) SwathWidth - 1, *MetaRequestHeight)
+ + *MetaRequestHeight;
+ *MetaRowByte = *meta_row_width * *MetaRequestWidth * BytePerPixel / 256.0;
+ }
+ DCCMetaSurfaceBytes = DCCMetaPitch * (dml_ceil(ViewportHeight - 1, 64 * BlockHeight256Bytes)
+ + 64 * BlockHeight256Bytes) * BytePerPixel / 256;
+ if (GPUVMEnable == true) {
+ *MetaPTEBytesFrame = (dml_ceil((double) (DCCMetaSurfaceBytes - 4.0 * 1024.0) / (8 * 4.0 * 1024), 1) + 1) * 64;
+ MPDEBytesFrame = 128 * (mode_lib->vba.GPUVMMaxPageTableLevels - 1);
+ } else {
+ *MetaPTEBytesFrame = 0;
+ MPDEBytesFrame = 0;
+ }
+
+ if (DCCEnable != true) {
+ *MetaPTEBytesFrame = 0;
+ MPDEBytesFrame = 0;
+ *MetaRowByte = 0;
+ }
+
+ if (SurfaceTiling == dm_sw_linear) {
+ MacroTileSizeBytes = 256;
+ MacroTileHeight = BlockHeight256Bytes;
+ } else {
+ MacroTileSizeBytes = 65536;
+ MacroTileHeight = 16 * BlockHeight256Bytes;
+ }
+ *MacroTileWidth = MacroTileSizeBytes / BytePerPixel / MacroTileHeight;
+
+ if (GPUVMEnable == true && mode_lib->vba.GPUVMMaxPageTableLevels > 1) {
+ if (ScanDirection != dm_vert) {
+ *DPDE0BytesFrame = 64 * (dml_ceil(((Pitch * (dml_ceil(ViewportHeight - 1, MacroTileHeight) + MacroTileHeight) * BytePerPixel) - MacroTileSizeBytes) / (8 * 2097152), 1) + 1);
+ } else {
+ *DPDE0BytesFrame = 64 * (dml_ceil(((Pitch * (dml_ceil((double) SwathWidth - 1, MacroTileHeight) + MacroTileHeight) * BytePerPixel) - MacroTileSizeBytes) / (8 * 2097152), 1) + 1);
+ }
+ ExtraDPDEBytesFrame = 128 * (mode_lib->vba.GPUVMMaxPageTableLevels - 2);
+ } else {
+ *DPDE0BytesFrame = 0;
+ ExtraDPDEBytesFrame = 0;
+ }
+
+ PDEAndMetaPTEBytesFrame = *MetaPTEBytesFrame + MPDEBytesFrame + *DPDE0BytesFrame
+ + ExtraDPDEBytesFrame;
+
+ if (HostVMEnable == true) {
+ PDEAndMetaPTEBytesFrame = PDEAndMetaPTEBytesFrame * (1 + 8 * HostVMDynamicLevels);
+ }
+
+ if (SurfaceTiling == dm_sw_linear) {
+ PixelPTEReqHeightPTEs = 1;
+ *PixelPTEReqHeight = 1;
+ *PixelPTEReqWidth = 32768.0 / BytePerPixel;
+ *PTERequestSize = 64;
+ FractionOfPTEReturnDrop = 0;
+ } else if (MacroTileSizeBytes == 4096) {
+ PixelPTEReqHeightPTEs = 1;
+ *PixelPTEReqHeight = MacroTileHeight;
+ *PixelPTEReqWidth = 8 * *MacroTileWidth;
+ *PTERequestSize = 64;
+ if (ScanDirection != dm_vert)
+ FractionOfPTEReturnDrop = 0;
+ else
+ FractionOfPTEReturnDrop = 7 / 8;
+ } else if (GPUVMMinPageSize == 4 && MacroTileSizeBytes > 4096) {
+ PixelPTEReqHeightPTEs = 16;
+ *PixelPTEReqHeight = 16 * BlockHeight256Bytes;
+ *PixelPTEReqWidth = 16 * BlockWidth256Bytes;
+ *PTERequestSize = 128;
+ FractionOfPTEReturnDrop = 0;
+ } else {
+ PixelPTEReqHeightPTEs = 1;
+ *PixelPTEReqHeight = MacroTileHeight;
+ *PixelPTEReqWidth = 8 * *MacroTileWidth;
+ *PTERequestSize = 64;
+ FractionOfPTEReturnDrop = 0;
+ }
+
+ if (SurfaceTiling == dm_sw_linear) {
+ *dpte_row_height = dml_min(128, 1 << (unsigned int) dml_floor(dml_log2(PTEBufferSizeInRequests * *PixelPTEReqWidth / Pitch), 1));
+ *dpte_row_width_ub = (dml_ceil(((double) SwathWidth - 1) / *PixelPTEReqWidth, 1) + 1) * *PixelPTEReqWidth;
+ *PixelPTEBytesPerRow = *dpte_row_width_ub / *PixelPTEReqWidth * *PTERequestSize;
+ } else if (ScanDirection != dm_vert) {
+ *dpte_row_height = *PixelPTEReqHeight;
+ *dpte_row_width_ub = (dml_ceil((double) (SwathWidth - 1) / *PixelPTEReqWidth, 1) + 1) * *PixelPTEReqWidth;
+ *PixelPTEBytesPerRow = *dpte_row_width_ub / *PixelPTEReqWidth * *PTERequestSize;
+ } else {
+ *dpte_row_height = dml_min(*PixelPTEReqWidth, *MacroTileWidth);
+ *dpte_row_width_ub = (dml_ceil((double) (SwathWidth - 1) / *PixelPTEReqHeight, 1) + 1) * *PixelPTEReqHeight;
+ *PixelPTEBytesPerRow = *dpte_row_width_ub / *PixelPTEReqHeight * *PTERequestSize;
+ }
+ if (*PixelPTEBytesPerRow * (1 - FractionOfPTEReturnDrop)
+ <= 64 * PTEBufferSizeInRequests) {
+ *PTEBufferSizeNotExceeded = true;
+ } else {
+ *PTEBufferSizeNotExceeded = false;
+ }
+
+ if (GPUVMEnable != true) {
+ *PixelPTEBytesPerRow = 0;
+ *PTEBufferSizeNotExceeded = true;
+ }
+ dml_print("DML: vm_bytes = meta_pte_bytes_per_frame (per_pipe) = MetaPTEBytesFrame = : %i\n", *MetaPTEBytesFrame);
+
+ if (HostVMEnable == true) {
+ *PixelPTEBytesPerRow = *PixelPTEBytesPerRow * (1 + 8 * HostVMDynamicLevels);
+ }
+
+ if (HostVMEnable == true) {
+ *vm_group_bytes = 512;
+ *dpte_group_bytes = 512;
+ } else if (GPUVMEnable == true) {
+ *vm_group_bytes = 2048;
+ if (SurfaceTiling != dm_sw_linear && PixelPTEReqHeightPTEs == 1 && ScanDirection == dm_vert) {
+ *dpte_group_bytes = 512;
+ } else {
+ *dpte_group_bytes = 2048;
+ }
+ } else {
+ *vm_group_bytes = 0;
+ *dpte_group_bytes = 0;
+ }
+
+ return PDEAndMetaPTEBytesFrame;
+}
+
+static void DISPCLKDPPCLKDCFCLKDeepSleepPrefetchParametersWatermarksAndPerformanceCalculation(
+ struct display_mode_lib *mode_lib)
+{
+ struct vba_vars_st *v = &mode_lib->vba;
+ unsigned int j, k;
+ long ReorderBytes = 0;
+ unsigned int PrefetchMode = v->PrefetchModePerState[v->VoltageLevel][v->maxMpcComb];
+ double MaxTotalRDBandwidth = 0;
+ double MaxTotalRDBandwidthNoUrgentBurst = 0;
+ bool DestinationLineTimesForPrefetchLessThan2 = false;
+ bool VRatioPrefetchMoreThan4 = false;
+ double TWait;
+
+ v->WritebackDISPCLK = 0.0;
+ v->DISPCLKWithRamping = 0;
+ v->DISPCLKWithoutRamping = 0;
+ v->GlobalDPPCLK = 0.0;
+ /* DAL custom code: need to update ReturnBW in case min dcfclk is overriden */
+ v->IdealSDPPortBandwidthPerState[v->VoltageLevel][v->maxMpcComb] = dml_min3(
+ v->ReturnBusWidth * v->DCFCLK,
+ v->DRAMSpeedPerState[v->VoltageLevel] * v->NumberOfChannels * v->DRAMChannelWidth,
+ v->FabricClockPerState[v->VoltageLevel] * v->FabricDatapathToDCNDataReturn);
+ if (v->HostVMEnable != true) {
+ v->ReturnBW = v->IdealSDPPortBandwidthPerState[v->VoltageLevel][v->maxMpcComb] * v->PercentOfIdealDRAMFabricAndSDPPortBWReceivedAfterUrgLatencyPixelDataOnly / 100;
+ } else {
+ v->ReturnBW = v->IdealSDPPortBandwidthPerState[v->VoltageLevel][v->maxMpcComb] * v->PercentOfIdealDRAMFabricAndSDPPortBWReceivedAfterUrgLatencyPixelMixedWithVMData / 100;
+ }
+ /* End DAL custom code */
+
+ // DISPCLK and DPPCLK Calculation
+ //
+ for (k = 0; k < v->NumberOfActivePlanes; ++k) {
+ if (v->WritebackEnable[k]) {
+ v->WritebackDISPCLK = dml_max(v->WritebackDISPCLK,
+ dml30_CalculateWriteBackDISPCLK(
+ v->WritebackPixelFormat[k],
+ v->PixelClock[k],
+ v->WritebackHRatio[k],
+ v->WritebackVRatio[k],
+ v->WritebackHTaps[k],
+ v->WritebackVTaps[k],
+ v->WritebackSourceWidth[k],
+ v->WritebackDestinationWidth[k],
+ v->HTotal[k],
+ v->WritebackLineBufferSize));
+ }
+ }
+
+ for (k = 0; k < v->NumberOfActivePlanes; ++k) {
+ if (v->HRatio[k] > 1) {
+ v->PSCL_THROUGHPUT_LUMA[k] = dml_min(v->MaxDCHUBToPSCLThroughput,
+ v->MaxPSCLToLBThroughput * v->HRatio[k] / dml_ceil(v->htaps[k] / 6.0, 1));
+ } else {
+ v->PSCL_THROUGHPUT_LUMA[k] = dml_min(
+ v->MaxDCHUBToPSCLThroughput,
+ v->MaxPSCLToLBThroughput);
+ }
+
+ v->DPPCLKUsingSingleDPPLuma = v->PixelClock[k]
+ * dml_max(v->vtaps[k] / 6.0 * dml_min(1.0, v->HRatio[k]),
+ dml_max(v->HRatio[k] * v->VRatio[k] / v->PSCL_THROUGHPUT_LUMA[k], 1.0));
+
+ if ((v->htaps[k] > 6 || v->vtaps[k] > 6)
+ && v->DPPCLKUsingSingleDPPLuma < 2 * v->PixelClock[k]) {
+ v->DPPCLKUsingSingleDPPLuma = 2 * v->PixelClock[k];
+ }
+
+ if ((v->SourcePixelFormat[k] != dm_420_8
+ && v->SourcePixelFormat[k] != dm_420_10
+ && v->SourcePixelFormat[k] != dm_420_12
+ && v->SourcePixelFormat[k] != dm_rgbe_alpha)) {
+ v->PSCL_THROUGHPUT_CHROMA[k] = 0.0;
+ v->DPPCLKUsingSingleDPP[k] = v->DPPCLKUsingSingleDPPLuma;
+ } else {
+ if (v->HRatioChroma[k] > 1) {
+ v->PSCL_THROUGHPUT_CHROMA[k] = dml_min(v->MaxDCHUBToPSCLThroughput,
+ v->MaxPSCLToLBThroughput * v->HRatioChroma[k] / dml_ceil(v->HTAPsChroma[k] / 6.0, 1.0));
+ } else {
+ v->PSCL_THROUGHPUT_CHROMA[k] = dml_min(
+ v->MaxDCHUBToPSCLThroughput,
+ v->MaxPSCLToLBThroughput);
+ }
+ v->DPPCLKUsingSingleDPPChroma = v->PixelClock[k]
+ * dml_max3(v->VTAPsChroma[k] / 6.0 * dml_min(1.0, v->HRatioChroma[k]),
+ v->HRatioChroma[k] * v->VRatioChroma[k] / v->PSCL_THROUGHPUT_CHROMA[k], 1.0);
+
+ if ((v->HTAPsChroma[k] > 6 || v->VTAPsChroma[k] > 6)
+ && v->DPPCLKUsingSingleDPPChroma
+ < 2 * v->PixelClock[k]) {
+ v->DPPCLKUsingSingleDPPChroma = 2
+ * v->PixelClock[k];
+ }
+
+ v->DPPCLKUsingSingleDPP[k] = dml_max(
+ v->DPPCLKUsingSingleDPPLuma,
+ v->DPPCLKUsingSingleDPPChroma);
+ }
+ }
+
+ for (k = 0; k < v->NumberOfActivePlanes; ++k) {
+ if (v->BlendingAndTiming[k] != k)
+ continue;
+ if (v->ODMCombineEnabled[k] == dm_odm_combine_mode_4to1) {
+ v->DISPCLKWithRamping = dml_max(v->DISPCLKWithRamping,
+ v->PixelClock[k] / 4 * (1 + v->DISPCLKDPPCLKDSCCLKDownSpreading / 100)
+ * (1 + v->DISPCLKRampingMargin / 100));
+ v->DISPCLKWithoutRamping = dml_max(v->DISPCLKWithoutRamping,
+ v->PixelClock[k] / 4 * (1 + v->DISPCLKDPPCLKDSCCLKDownSpreading / 100));
+ } else if (v->ODMCombineEnabled[k] == dm_odm_combine_mode_2to1) {
+ v->DISPCLKWithRamping = dml_max(v->DISPCLKWithRamping,
+ v->PixelClock[k] / 2 * (1 + v->DISPCLKDPPCLKDSCCLKDownSpreading / 100)
+ * (1 + v->DISPCLKRampingMargin / 100));
+ v->DISPCLKWithoutRamping = dml_max(v->DISPCLKWithoutRamping,
+ v->PixelClock[k] / 2 * (1 + v->DISPCLKDPPCLKDSCCLKDownSpreading / 100));
+ } else {
+ v->DISPCLKWithRamping = dml_max(v->DISPCLKWithRamping,
+ v->PixelClock[k] * (1 + v->DISPCLKDPPCLKDSCCLKDownSpreading / 100)
+ * (1 + v->DISPCLKRampingMargin / 100));
+ v->DISPCLKWithoutRamping = dml_max(v->DISPCLKWithoutRamping,
+ v->PixelClock[k] * (1 + v->DISPCLKDPPCLKDSCCLKDownSpreading / 100));
+ }
+ }
+
+ v->DISPCLKWithRamping = dml_max(
+ v->DISPCLKWithRamping,
+ v->WritebackDISPCLK);
+ v->DISPCLKWithoutRamping = dml_max(
+ v->DISPCLKWithoutRamping,
+ v->WritebackDISPCLK);
+
+ ASSERT(v->DISPCLKDPPCLKVCOSpeed != 0);
+ v->DISPCLKWithRampingRoundedToDFSGranularity = RoundToDFSGranularityUp(
+ v->DISPCLKWithRamping,
+ v->DISPCLKDPPCLKVCOSpeed);
+ v->DISPCLKWithoutRampingRoundedToDFSGranularity = RoundToDFSGranularityUp(
+ v->DISPCLKWithoutRamping,
+ v->DISPCLKDPPCLKVCOSpeed);
+ v->MaxDispclkRoundedToDFSGranularity = RoundToDFSGranularityDown(
+ v->soc.clock_limits[mode_lib->soc.num_states].dispclk_mhz,
+ v->DISPCLKDPPCLKVCOSpeed);
+ if (v->DISPCLKWithoutRampingRoundedToDFSGranularity
+ > v->MaxDispclkRoundedToDFSGranularity) {
+ v->DISPCLK_calculated =
+ v->DISPCLKWithoutRampingRoundedToDFSGranularity;
+ } else if (v->DISPCLKWithRampingRoundedToDFSGranularity
+ > v->MaxDispclkRoundedToDFSGranularity) {
+ v->DISPCLK_calculated = v->MaxDispclkRoundedToDFSGranularity;
+ } else {
+ v->DISPCLK_calculated =
+ v->DISPCLKWithRampingRoundedToDFSGranularity;
+ }
+ v->DISPCLK = v->DISPCLK_calculated;
+ DTRACE(" dispclk_mhz (calculated) = %f", v->DISPCLK_calculated);
+
+ for (k = 0; k < v->NumberOfActivePlanes; ++k) {
+ v->DPPCLK_calculated[k] = v->DPPCLKUsingSingleDPP[k]
+ / v->DPPPerPlane[k]
+ * (1 + v->DISPCLKDPPCLKDSCCLKDownSpreading / 100);
+ v->GlobalDPPCLK = dml_max(
+ v->GlobalDPPCLK,
+ v->DPPCLK_calculated[k]);
+ }
+ v->GlobalDPPCLK = RoundToDFSGranularityUp(
+ v->GlobalDPPCLK,
+ v->DISPCLKDPPCLKVCOSpeed);
+ for (k = 0; k < v->NumberOfActivePlanes; ++k) {
+ v->DPPCLK_calculated[k] = v->GlobalDPPCLK / 255
+ * dml_ceil(
+ v->DPPCLK_calculated[k] * 255.0
+ / v->GlobalDPPCLK,
+ 1);
+ DTRACE(" dppclk_mhz[%i] (calculated) = %f", k, v->DPPCLK_calculated[k]);
+ v->DPPCLK[k] = v->DPPCLK_calculated[k];
+ }
+
+ // Urgent and B P-State/DRAM Clock Change Watermark
+ DTRACE(" dcfclk_mhz = %f", v->DCFCLK);
+ DTRACE(" return_bus_bw = %f", v->ReturnBW);
+
+ for (k = 0; k < v->NumberOfActivePlanes; ++k) {
+ CalculateBytePerPixelAnd256BBlockSizes(
+ v->SourcePixelFormat[k],
+ v->SurfaceTiling[k],
+ &v->BytePerPixelY[k],
+ &v->BytePerPixelC[k],
+ &v->BytePerPixelDETY[k],
+ &v->BytePerPixelDETC[k],
+ &v->BlockHeight256BytesY[k],
+ &v->BlockHeight256BytesC[k],
+ &v->BlockWidth256BytesY[k],
+ &v->BlockWidth256BytesC[k]);
+ }
+
+ CalculateSwathWidth(
+ false,
+ v->NumberOfActivePlanes,
+ v->SourcePixelFormat,
+ v->SourceScan,
+ v->ViewportWidth,
+ v->ViewportHeight,
+ v->SurfaceWidthY,
+ v->SurfaceWidthC,
+ v->SurfaceHeightY,
+ v->SurfaceHeightC,
+ v->ODMCombineEnabled,
+ v->BytePerPixelY,
+ v->BytePerPixelC,
+ v->BlockHeight256BytesY,
+ v->BlockHeight256BytesC,
+ v->BlockWidth256BytesY,
+ v->BlockWidth256BytesC,
+ v->BlendingAndTiming,
+ v->HActive,
+ v->HRatio,
+ v->DPPPerPlane,
+ v->SwathWidthSingleDPPY,
+ v->SwathWidthSingleDPPC,
+ v->SwathWidthY,
+ v->SwathWidthC,
+ v->dummyinteger3,
+ v->dummyinteger4,
+ v->swath_width_luma_ub,
+ v->swath_width_chroma_ub);
+
+
+ for (k = 0; k < v->NumberOfActivePlanes; ++k) {
+ v->ReadBandwidthPlaneLuma[k] = v->SwathWidthSingleDPPY[k] * v->BytePerPixelY[k] / (v->HTotal[k] / v->PixelClock[k]) * v->VRatio[k];
+ v->ReadBandwidthPlaneChroma[k] = v->SwathWidthSingleDPPC[k] * v->BytePerPixelC[k] / (v->HTotal[k] / v->PixelClock[k]) * v->VRatioChroma[k];
+ DTRACE("read_bw[%i] = %fBps", k, v->ReadBandwidthPlaneLuma[k] + v->ReadBandwidthPlaneChroma[k]);
+ }
+
+
+ // DCFCLK Deep Sleep
+ CalculateDCFCLKDeepSleep(
+ mode_lib,
+ v->NumberOfActivePlanes,
+ v->BytePerPixelY,
+ v->BytePerPixelC,
+ v->VRatio,
+ v->VRatioChroma,
+ v->SwathWidthY,
+ v->SwathWidthC,
+ v->DPPPerPlane,
+ v->HRatio,
+ v->HRatioChroma,
+ v->PixelClock,
+ v->PSCL_THROUGHPUT_LUMA,
+ v->PSCL_THROUGHPUT_CHROMA,
+ v->DPPCLK,
+ v->ReadBandwidthPlaneLuma,
+ v->ReadBandwidthPlaneChroma,
+ v->ReturnBusWidth,
+ &v->DCFCLKDeepSleep);
+
+ // DSCCLK
+ for (k = 0; k < v->NumberOfActivePlanes; ++k) {
+ if ((v->BlendingAndTiming[k] != k) || !v->DSCEnabled[k]) {
+ v->DSCCLK_calculated[k] = 0.0;
+ } else {
+ if (v->OutputFormat[k] == dm_420)
+ v->DSCFormatFactor = 2;
+ else if (v->OutputFormat[k] == dm_444)
+ v->DSCFormatFactor = 1;
+ else if (v->OutputFormat[k] == dm_n422)
+ v->DSCFormatFactor = 2;
+ else
+ v->DSCFormatFactor = 1;
+ if (v->ODMCombineEnabled[k] == dm_odm_combine_mode_4to1)
+ v->DSCCLK_calculated[k] = v->PixelClockBackEnd[k] / 12
+ / v->DSCFormatFactor / (1 - v->DISPCLKDPPCLKDSCCLKDownSpreading / 100);
+ else if (v->ODMCombineEnabled[k] == dm_odm_combine_mode_2to1)
+ v->DSCCLK_calculated[k] = v->PixelClockBackEnd[k] / 6
+ / v->DSCFormatFactor / (1 - v->DISPCLKDPPCLKDSCCLKDownSpreading / 100);
+ else
+ v->DSCCLK_calculated[k] = v->PixelClockBackEnd[k] / 3
+ / v->DSCFormatFactor / (1 - v->DISPCLKDPPCLKDSCCLKDownSpreading / 100);
+ }
+ }
+
+ // DSC Delay
+ for (k = 0; k < v->NumberOfActivePlanes; ++k) {
+ double BPP = v->OutputBppPerState[k][v->VoltageLevel];
+
+ if (v->DSCEnabled[k] && BPP != 0) {
+ if (v->ODMCombineEnabled[k] == dm_odm_combine_mode_disabled) {
+ v->DSCDelay[k] = dscceComputeDelay(v->DSCInputBitPerComponent[k],
+ BPP,
+ dml_ceil((double) v->HActive[k] / v->NumberOfDSCSlices[k], 1),
+ v->NumberOfDSCSlices[k],
+ v->OutputFormat[k],
+ v->Output[k])
+ + dscComputeDelay(v->OutputFormat[k], v->Output[k]);
+ } else if (v->ODMCombineEnabled[k] == dm_odm_combine_mode_2to1) {
+ v->DSCDelay[k] = 2 * dscceComputeDelay(v->DSCInputBitPerComponent[k],
+ BPP,
+ dml_ceil((double) v->HActive[k] / v->NumberOfDSCSlices[k], 1),
+ v->NumberOfDSCSlices[k] / 2.0,
+ v->OutputFormat[k],
+ v->Output[k])
+ + dscComputeDelay(v->OutputFormat[k], v->Output[k]);
+ } else {
+ v->DSCDelay[k] = 4 * dscceComputeDelay(v->DSCInputBitPerComponent[k],
+ BPP,
+ dml_ceil((double) v->HActive[k] / v->NumberOfDSCSlices[k], 1),
+ v->NumberOfDSCSlices[k] / 4.0,
+ v->OutputFormat[k],
+ v->Output[k])
+ + dscComputeDelay(v->OutputFormat[k], v->Output[k]);
+ }
+ v->DSCDelay[k] = v->DSCDelay[k] * v->PixelClock[k] / v->PixelClockBackEnd[k];
+ } else {
+ v->DSCDelay[k] = 0;
+ }
+ }
+
+ for (k = 0; k < v->NumberOfActivePlanes; ++k)
+ for (j = 0; j < v->NumberOfActivePlanes; ++j) // NumberOfPlanes
+ if (j != k && v->BlendingAndTiming[k] == j
+ && v->DSCEnabled[j])
+ v->DSCDelay[k] = v->DSCDelay[j];
+
+ // Prefetch
+ for (k = 0; k < v->NumberOfActivePlanes; ++k) {
+ unsigned int PDEAndMetaPTEBytesFrameY = 0;
+ unsigned int PixelPTEBytesPerRowY = 0;
+ unsigned int MetaRowByteY = 0;
+ unsigned int MetaRowByteC = 0;
+ unsigned int PDEAndMetaPTEBytesFrameC = 0;
+ unsigned int PixelPTEBytesPerRowC = 0;
+ bool PTEBufferSizeNotExceededY = 0;
+ bool PTEBufferSizeNotExceededC = 0;
+
+
+ if (v->SourcePixelFormat[k] == dm_420_8 || v->SourcePixelFormat[k] == dm_420_10 || v->SourcePixelFormat[k] == dm_420_12 || v->SourcePixelFormat[k] == dm_rgbe_alpha) {
+ if ((v->SourcePixelFormat[k] == dm_420_10 || v->SourcePixelFormat[k] == dm_420_12) && v->SourceScan[k] != dm_vert) {
+ v->PTEBufferSizeInRequestsForLuma = (v->PTEBufferSizeInRequestsLuma + v->PTEBufferSizeInRequestsChroma) / 2;
+ v->PTEBufferSizeInRequestsForChroma = v->PTEBufferSizeInRequestsForLuma;
+ } else {
+ v->PTEBufferSizeInRequestsForLuma = v->PTEBufferSizeInRequestsLuma;
+ v->PTEBufferSizeInRequestsForChroma = v->PTEBufferSizeInRequestsChroma;
+
+ }
+ PDEAndMetaPTEBytesFrameC = CalculateVMAndRowBytes(
+ mode_lib,
+ v->DCCEnable[k],
+ v->BlockHeight256BytesC[k],
+ v->BlockWidth256BytesC[k],
+ v->SourcePixelFormat[k],
+ v->SurfaceTiling[k],
+ v->BytePerPixelC[k],
+ v->SourceScan[k],
+ v->SwathWidthC[k],
+ v->ViewportHeightChroma[k],
+ v->GPUVMEnable,
+ v->HostVMEnable,
+ v->HostVMMaxNonCachedPageTableLevels,
+ v->GPUVMMinPageSize,
+ v->HostVMMinPageSize,
+ v->PTEBufferSizeInRequestsForChroma,
+ v->PitchC[k],
+ v->DCCMetaPitchC[k],
+ &v->MacroTileWidthC[k],
+ &MetaRowByteC,
+ &PixelPTEBytesPerRowC,
+ &PTEBufferSizeNotExceededC,
+ &v->dpte_row_width_chroma_ub[k],
+ &v->dpte_row_height_chroma[k],
+ &v->meta_req_width_chroma[k],
+ &v->meta_req_height_chroma[k],
+ &v->meta_row_width_chroma[k],
+ &v->meta_row_height_chroma[k],
+ &v->dummyinteger1,
+ &v->dummyinteger2,
+ &v->PixelPTEReqWidthC[k],
+ &v->PixelPTEReqHeightC[k],
+ &v->PTERequestSizeC[k],
+ &v->dpde0_bytes_per_frame_ub_c[k],
+ &v->meta_pte_bytes_per_frame_ub_c[k]);
+
+ v->PrefetchSourceLinesC[k] = CalculatePrefetchSourceLines(
+ mode_lib,
+ v->VRatioChroma[k],
+ v->VTAPsChroma[k],
+ v->Interlace[k],
+ v->ProgressiveToInterlaceUnitInOPP,
+ v->SwathHeightC[k],
+ v->ViewportYStartC[k],
+ &v->VInitPreFillC[k],
+ &v->MaxNumSwathC[k]);
+ } else {
+ v->PTEBufferSizeInRequestsForLuma = v->PTEBufferSizeInRequestsLuma + v->PTEBufferSizeInRequestsChroma;
+ v->PTEBufferSizeInRequestsForChroma = 0;
+ PixelPTEBytesPerRowC = 0;
+ PDEAndMetaPTEBytesFrameC = 0;
+ MetaRowByteC = 0;
+ v->MaxNumSwathC[k] = 0;
+ v->PrefetchSourceLinesC[k] = 0;
+ }
+
+ PDEAndMetaPTEBytesFrameY = CalculateVMAndRowBytes(
+ mode_lib,
+ v->DCCEnable[k],
+ v->BlockHeight256BytesY[k],
+ v->BlockWidth256BytesY[k],
+ v->SourcePixelFormat[k],
+ v->SurfaceTiling[k],
+ v->BytePerPixelY[k],
+ v->SourceScan[k],
+ v->SwathWidthY[k],
+ v->ViewportHeight[k],
+ v->GPUVMEnable,
+ v->HostVMEnable,
+ v->HostVMMaxNonCachedPageTableLevels,
+ v->GPUVMMinPageSize,
+ v->HostVMMinPageSize,
+ v->PTEBufferSizeInRequestsForLuma,
+ v->PitchY[k],
+ v->DCCMetaPitchY[k],
+ &v->MacroTileWidthY[k],
+ &MetaRowByteY,
+ &PixelPTEBytesPerRowY,
+ &PTEBufferSizeNotExceededY,
+ &v->dpte_row_width_luma_ub[k],
+ &v->dpte_row_height[k],
+ &v->meta_req_width[k],
+ &v->meta_req_height[k],
+ &v->meta_row_width[k],
+ &v->meta_row_height[k],
+ &v->vm_group_bytes[k],
+ &v->dpte_group_bytes[k],
+ &v->PixelPTEReqWidthY[k],
+ &v->PixelPTEReqHeightY[k],
+ &v->PTERequestSizeY[k],
+ &v->dpde0_bytes_per_frame_ub_l[k],
+ &v->meta_pte_bytes_per_frame_ub_l[k]);
+
+ v->PrefetchSourceLinesY[k] = CalculatePrefetchSourceLines(
+ mode_lib,
+ v->VRatio[k],
+ v->vtaps[k],
+ v->Interlace[k],
+ v->ProgressiveToInterlaceUnitInOPP,
+ v->SwathHeightY[k],
+ v->ViewportYStartY[k],
+ &v->VInitPreFillY[k],
+ &v->MaxNumSwathY[k]);
+ v->PixelPTEBytesPerRow[k] = PixelPTEBytesPerRowY + PixelPTEBytesPerRowC;
+ v->PDEAndMetaPTEBytesFrame[k] = PDEAndMetaPTEBytesFrameY
+ + PDEAndMetaPTEBytesFrameC;
+ v->MetaRowByte[k] = MetaRowByteY + MetaRowByteC;
+
+ CalculateRowBandwidth(
+ v->GPUVMEnable,
+ v->SourcePixelFormat[k],
+ v->VRatio[k],
+ v->VRatioChroma[k],
+ v->DCCEnable[k],
+ v->HTotal[k] / v->PixelClock[k],
+ MetaRowByteY,
+ MetaRowByteC,
+ v->meta_row_height[k],
+ v->meta_row_height_chroma[k],
+ PixelPTEBytesPerRowY,
+ PixelPTEBytesPerRowC,
+ v->dpte_row_height[k],
+ v->dpte_row_height_chroma[k],
+ &v->meta_row_bw[k],
+ &v->dpte_row_bw[k]);
+ }
+
+ v->TotalDCCActiveDPP = 0;
+ v->TotalActiveDPP = 0;
+ for (k = 0; k < v->NumberOfActivePlanes; ++k) {
+ v->TotalActiveDPP = v->TotalActiveDPP
+ + v->DPPPerPlane[k];
+ if (v->DCCEnable[k])
+ v->TotalDCCActiveDPP = v->TotalDCCActiveDPP
+ + v->DPPPerPlane[k];
+ }
+
+
+ ReorderBytes = v->NumberOfChannels * dml_max3(
+ v->UrgentOutOfOrderReturnPerChannelPixelDataOnly,
+ v->UrgentOutOfOrderReturnPerChannelPixelMixedWithVMData,
+ v->UrgentOutOfOrderReturnPerChannelVMDataOnly);
+
+ v->UrgentExtraLatency = CalculateExtraLatency(
+ v->RoundTripPingLatencyCycles,
+ ReorderBytes,
+ v->DCFCLK,
+ v->TotalActiveDPP,
+ v->PixelChunkSizeInKByte,
+ v->TotalDCCActiveDPP,
+ v->MetaChunkSize,
+ v->ReturnBW,
+ v->GPUVMEnable,
+ v->HostVMEnable,
+ v->NumberOfActivePlanes,
+ v->DPPPerPlane,
+ v->dpte_group_bytes,
+ v->PercentOfIdealDRAMFabricAndSDPPortBWReceivedAfterUrgLatencyPixelMixedWithVMData,
+ v->PercentOfIdealDRAMFabricAndSDPPortBWReceivedAfterUrgLatencyVMDataOnly,
+ v->HostVMMinPageSize,
+ v->HostVMMaxNonCachedPageTableLevels);
+
+ v->TCalc = 24.0 / v->DCFCLKDeepSleep;
+
+ for (k = 0; k < v->NumberOfActivePlanes; ++k) {
+ if (v->BlendingAndTiming[k] == k) {
+ if (v->WritebackEnable[k] == true) {
+ v->WritebackDelay[v->VoltageLevel][k] = v->WritebackLatency +
+ CalculateWriteBackDelay(v->WritebackPixelFormat[k],
+ v->WritebackHRatio[k],
+ v->WritebackVRatio[k],
+ v->WritebackVTaps[k],
+ v->WritebackDestinationWidth[k],
+ v->WritebackDestinationHeight[k],
+ v->WritebackSourceHeight[k],
+ v->HTotal[k]) / v->DISPCLK;
+ } else
+ v->WritebackDelay[v->VoltageLevel][k] = 0;
+ for (j = 0; j < v->NumberOfActivePlanes; ++j) {
+ if (v->BlendingAndTiming[j] == k
+ && v->WritebackEnable[j] == true) {
+ v->WritebackDelay[v->VoltageLevel][k] = dml_max(v->WritebackDelay[v->VoltageLevel][k],
+ v->WritebackLatency + CalculateWriteBackDelay(
+ v->WritebackPixelFormat[j],
+ v->WritebackHRatio[j],
+ v->WritebackVRatio[j],
+ v->WritebackVTaps[j],
+ v->WritebackDestinationWidth[j],
+ v->WritebackDestinationHeight[j],
+ v->WritebackSourceHeight[j],
+ v->HTotal[k]) / v->DISPCLK);
+ }
+ }
+ }
+ }
+
+ for (k = 0; k < v->NumberOfActivePlanes; ++k)
+ for (j = 0; j < v->NumberOfActivePlanes; ++j)
+ if (v->BlendingAndTiming[k] == j)
+ v->WritebackDelay[v->VoltageLevel][k] = v->WritebackDelay[v->VoltageLevel][j];
+
+ for (k = 0; k < v->NumberOfActivePlanes; ++k) {
+ v->MaxVStartupLines[k] = v->VTotal[k] - v->VActive[k] - dml_max(1.0, dml_ceil((double) v->WritebackDelay[v->VoltageLevel][k] / (v->HTotal[k] / v->PixelClock[k]), 1));
+ }
+
+ v->MaximumMaxVStartupLines = 0;
+ for (k = 0; k < v->NumberOfActivePlanes; ++k)
+ v->MaximumMaxVStartupLines = dml_max(v->MaximumMaxVStartupLines, v->MaxVStartupLines[k]);
+
+ if (v->DRAMClockChangeLatencyOverride > 0.0) {
+ v->FinalDRAMClockChangeLatency = v->DRAMClockChangeLatencyOverride;
+ } else {
+ v->FinalDRAMClockChangeLatency = v->DRAMClockChangeLatency;
+ }
+ v->UrgentLatency = CalculateUrgentLatency(v->UrgentLatencyPixelDataOnly, v->UrgentLatencyPixelMixedWithVMData, v->UrgentLatencyVMDataOnly, v->DoUrgentLatencyAdjustment, v->UrgentLatencyAdjustmentFabricClockComponent, v->UrgentLatencyAdjustmentFabricClockReference, v->FabricClock);
+
+
+ v->FractionOfUrgentBandwidth = 0.0;
+ v->FractionOfUrgentBandwidthImmediateFlip = 0.0;
+
+ v->VStartupLines = 13;
+
+ do {
+ MaxTotalRDBandwidth = 0;
+ MaxTotalRDBandwidthNoUrgentBurst = 0;
+ DestinationLineTimesForPrefetchLessThan2 = false;
+ VRatioPrefetchMoreThan4 = false;
+ TWait = CalculateTWait(
+ PrefetchMode,
+ v->FinalDRAMClockChangeLatency,
+ v->UrgentLatency,
+ v->SREnterPlusExitTime);
+
+ for (k = 0; k < v->NumberOfActivePlanes; ++k) {
+ Pipe myPipe = { 0 };
+
+ myPipe.DPPCLK = v->DPPCLK[k];
+ myPipe.DISPCLK = v->DISPCLK;
+ myPipe.PixelClock = v->PixelClock[k];
+ myPipe.DCFCLKDeepSleep = v->DCFCLKDeepSleep;
+ myPipe.DPPPerPlane = v->DPPPerPlane[k];
+ myPipe.ScalerEnabled = v->ScalerEnabled[k];
+ myPipe.SourceScan = v->SourceScan[k];
+ myPipe.BlockWidth256BytesY = v->BlockWidth256BytesY[k];
+ myPipe.BlockHeight256BytesY = v->BlockHeight256BytesY[k];
+ myPipe.BlockWidth256BytesC = v->BlockWidth256BytesC[k];
+ myPipe.BlockHeight256BytesC = v->BlockHeight256BytesC[k];
+ myPipe.InterlaceEnable = v->Interlace[k];
+ myPipe.NumberOfCursors = v->NumberOfCursors[k];
+ myPipe.VBlank = v->VTotal[k] - v->VActive[k];
+ myPipe.HTotal = v->HTotal[k];
+ myPipe.DCCEnable = v->DCCEnable[k];
+ myPipe.ODMCombineEnabled = !!v->ODMCombineEnabled[k];
+
+ v->ErrorResult[k] = CalculatePrefetchSchedule(
+ mode_lib,
+ v->PercentOfIdealDRAMFabricAndSDPPortBWReceivedAfterUrgLatencyPixelMixedWithVMData,
+ v->PercentOfIdealDRAMFabricAndSDPPortBWReceivedAfterUrgLatencyVMDataOnly,
+ &myPipe,
+ v->DSCDelay[k],
+ v->DPPCLKDelaySubtotal
+ + v->DPPCLKDelayCNVCFormater,
+ v->DPPCLKDelaySCL,
+ v->DPPCLKDelaySCLLBOnly,
+ v->DPPCLKDelayCNVCCursor,
+ v->DISPCLKDelaySubtotal,
+ (unsigned int) (v->SwathWidthY[k] / v->HRatio[k]),
+ v->OutputFormat[k],
+ v->MaxInterDCNTileRepeaters,
+ dml_min(v->VStartupLines, v->MaxVStartupLines[k]),
+ v->MaxVStartupLines[k],
+ v->GPUVMMaxPageTableLevels,
+ v->GPUVMEnable,
+ v->HostVMEnable,
+ v->HostVMMaxNonCachedPageTableLevels,
+ v->HostVMMinPageSize,
+ v->DynamicMetadataEnable[k],
+ v->DynamicMetadataVMEnabled,
+ v->DynamicMetadataLinesBeforeActiveRequired[k],
+ v->DynamicMetadataTransmittedBytes[k],
+ v->UrgentLatency,
+ v->UrgentExtraLatency,
+ v->TCalc,
+ v->PDEAndMetaPTEBytesFrame[k],
+ v->MetaRowByte[k],
+ v->PixelPTEBytesPerRow[k],
+ v->PrefetchSourceLinesY[k],
+ v->SwathWidthY[k],
+ v->BytePerPixelY[k],
+ v->VInitPreFillY[k],
+ v->MaxNumSwathY[k],
+ v->PrefetchSourceLinesC[k],
+ v->SwathWidthC[k],
+ v->BytePerPixelC[k],
+ v->VInitPreFillC[k],
+ v->MaxNumSwathC[k],
+ v->swath_width_luma_ub[k],
+ v->swath_width_chroma_ub[k],
+ v->SwathHeightY[k],
+ v->SwathHeightC[k],
+ TWait,
+ v->ProgressiveToInterlaceUnitInOPP,
+ &v->DSTXAfterScaler[k],
+ &v->DSTYAfterScaler[k],
+ &v->DestinationLinesForPrefetch[k],
+ &v->PrefetchBandwidth[k],
+ &v->DestinationLinesToRequestVMInVBlank[k],
+ &v->DestinationLinesToRequestRowInVBlank[k],
+ &v->VRatioPrefetchY[k],
+ &v->VRatioPrefetchC[k],
+ &v->RequiredPrefetchPixDataBWLuma[k],
+ &v->RequiredPrefetchPixDataBWChroma[k],
+ &v->NotEnoughTimeForDynamicMetadata,
+ &v->Tno_bw[k],
+ &v->prefetch_vmrow_bw[k],
+ &v->Tdmdl_vm[k],
+ &v->Tdmdl[k],
+ &v->VUpdateOffsetPix[k],
+ &v->VUpdateWidthPix[k],
+ &v->VReadyOffsetPix[k]);
+ if (v->BlendingAndTiming[k] == k) {
+ double TotalRepeaterDelayTime = v->MaxInterDCNTileRepeaters * (2 / v->DPPCLK[k] + 3 / v->DISPCLK);
+ v->VUpdateWidthPix[k] = (14 / v->DCFCLKDeepSleep + 12 / v->DPPCLK[k] + TotalRepeaterDelayTime) * v->PixelClock[k];
+ v->VReadyOffsetPix[k] = dml_max(150.0 / v->DPPCLK[k], TotalRepeaterDelayTime + 20 / v->DCFCLKDeepSleep + 10 / v->DPPCLK[k]) * v->PixelClock[k];
+ v->VUpdateOffsetPix[k] = dml_ceil(v->HTotal[k] / 4.0, 1);
+ v->VStartup[k] = dml_min(v->VStartupLines, v->MaxVStartupLines[k]);
+ } else {
+ int x = v->BlendingAndTiming[k];
+ double TotalRepeaterDelayTime = v->MaxInterDCNTileRepeaters * (2 / v->DPPCLK[k] + 3 / v->DISPCLK);
+ v->VUpdateWidthPix[k] = (14 / v->DCFCLKDeepSleep + 12 / v->DPPCLK[k] + TotalRepeaterDelayTime) * v->PixelClock[x];
+ v->VReadyOffsetPix[k] = dml_max(150.0 / v->DPPCLK[k], TotalRepeaterDelayTime + 20 / v->DCFCLKDeepSleep + 10 / v->DPPCLK[k]) * v->PixelClock[x];
+ v->VUpdateOffsetPix[k] = dml_ceil(v->HTotal[x] / 4.0, 1);
+ if (!v->MaxVStartupLines[x])
+ v->MaxVStartupLines[x] = v->MaxVStartupLines[k];
+ v->VStartup[k] = dml_min(v->VStartupLines, v->MaxVStartupLines[x]);
+ }
+ }
+
+ v->NotEnoughUrgentLatencyHiding = false;
+ v->NotEnoughUrgentLatencyHidingPre = false;
+
+ for (k = 0; k < v->NumberOfActivePlanes; ++k) {
+ v->cursor_bw[k] = v->NumberOfCursors[k]
+ * v->CursorWidth[k][0] * v->CursorBPP[k][0]
+ / 8.0
+ / (v->HTotal[k] / v->PixelClock[k])
+ * v->VRatio[k];
+ v->cursor_bw_pre[k] = v->NumberOfCursors[k]
+ * v->CursorWidth[k][0] * v->CursorBPP[k][0]
+ / 8.0
+ / (v->HTotal[k] / v->PixelClock[k])
+ * v->VRatioPrefetchY[k];
+
+ CalculateUrgentBurstFactor(
+ v->swath_width_luma_ub[k],
+ v->swath_width_chroma_ub[k],
+ v->DETBufferSizeInKByte,
+ v->SwathHeightY[k],
+ v->SwathHeightC[k],
+ v->HTotal[k] / v->PixelClock[k],
+ v->UrgentLatency,
+ v->CursorBufferSize,
+ v->CursorWidth[k][0],
+ v->CursorBPP[k][0],
+ v->VRatio[k],
+ v->VRatioChroma[k],
+ v->BytePerPixelDETY[k],
+ v->BytePerPixelDETC[k],
+ v->DETBufferSizeY[k],
+ v->DETBufferSizeC[k],
+ &v->UrgentBurstFactorCursor[k],
+ &v->UrgentBurstFactorLuma[k],
+ &v->UrgentBurstFactorChroma[k],
+ &v->NoUrgentLatencyHiding[k]);
+
+ CalculateUrgentBurstFactor(
+ v->swath_width_luma_ub[k],
+ v->swath_width_chroma_ub[k],
+ v->DETBufferSizeInKByte,
+ v->SwathHeightY[k],
+ v->SwathHeightC[k],
+ v->HTotal[k] / v->PixelClock[k],
+ v->UrgentLatency,
+ v->CursorBufferSize,
+ v->CursorWidth[k][0],
+ v->CursorBPP[k][0],
+ v->VRatioPrefetchY[k],
+ v->VRatioPrefetchC[k],
+ v->BytePerPixelDETY[k],
+ v->BytePerPixelDETC[k],
+ v->DETBufferSizeY[k],
+ v->DETBufferSizeC[k],
+ &v->UrgentBurstFactorCursorPre[k],
+ &v->UrgentBurstFactorLumaPre[k],
+ &v->UrgentBurstFactorChromaPre[k],
+ &v->NoUrgentLatencyHidingPre[k]);
+
+ MaxTotalRDBandwidth = MaxTotalRDBandwidth +
+ dml_max3(v->DPPPerPlane[k] * v->prefetch_vmrow_bw[k],
+ v->ReadBandwidthPlaneLuma[k] *
+ v->UrgentBurstFactorLuma[k] +
+ v->ReadBandwidthPlaneChroma[k] *
+ v->UrgentBurstFactorChroma[k] +
+ v->cursor_bw[k] *
+ v->UrgentBurstFactorCursor[k] +
+ v->DPPPerPlane[k] * (v->meta_row_bw[k] + v->dpte_row_bw[k]),
+ v->DPPPerPlane[k] * (v->RequiredPrefetchPixDataBWLuma[k] * v->UrgentBurstFactorLumaPre[k] +
+ v->RequiredPrefetchPixDataBWChroma[k] * v->UrgentBurstFactorChromaPre[k]) + v->cursor_bw_pre[k] *
+ v->UrgentBurstFactorCursorPre[k]);
+
+ MaxTotalRDBandwidthNoUrgentBurst = MaxTotalRDBandwidthNoUrgentBurst +
+ dml_max3(v->DPPPerPlane[k] * v->prefetch_vmrow_bw[k],
+ v->ReadBandwidthPlaneLuma[k] +
+ v->ReadBandwidthPlaneChroma[k] +
+ v->cursor_bw[k] +
+ v->DPPPerPlane[k] * (v->meta_row_bw[k] + v->dpte_row_bw[k]),
+ v->DPPPerPlane[k] * (v->RequiredPrefetchPixDataBWLuma[k] + v->RequiredPrefetchPixDataBWChroma[k]) + v->cursor_bw_pre[k]);
+
+ if (v->DestinationLinesForPrefetch[k] < 2)
+ DestinationLineTimesForPrefetchLessThan2 = true;
+ if (v->VRatioPrefetchY[k] > 4 || v->VRatioPrefetchC[k] > 4)
+ VRatioPrefetchMoreThan4 = true;
+ if (v->NoUrgentLatencyHiding[k] == true)
+ v->NotEnoughUrgentLatencyHiding = true;
+
+ if (v->NoUrgentLatencyHidingPre[k] == true)
+ v->NotEnoughUrgentLatencyHidingPre = true;
+ }
+ v->FractionOfUrgentBandwidth = MaxTotalRDBandwidthNoUrgentBurst / v->ReturnBW;
+
+
+ if (MaxTotalRDBandwidth <= v->ReturnBW && v->NotEnoughUrgentLatencyHiding == 0 && v->NotEnoughUrgentLatencyHidingPre == 0 && v->NotEnoughTimeForDynamicMetadata == 0 && !VRatioPrefetchMoreThan4
+ && !DestinationLineTimesForPrefetchLessThan2)
+ v->PrefetchModeSupported = true;
+ else {
+ v->PrefetchModeSupported = false;
+ dml_print("DML: CalculatePrefetchSchedule ***failed***. Bandwidth violation. Results are NOT valid\n");
+ dml_print("DML: MaxTotalRDBandwidth:%f AvailReturnBandwidth:%f\n", MaxTotalRDBandwidth, v->ReturnBW);
+ dml_print("DML: VRatioPrefetch %s more than 4\n", (VRatioPrefetchMoreThan4) ? "is" : "is not");
+ dml_print("DML: DestinationLines for Prefetch %s less than 2\n", (DestinationLineTimesForPrefetchLessThan2) ? "is" : "is not");
+ dml_print("DML: Not enough lines for dynamic meta is %s\n", (v->NotEnoughTimeForDynamicMetadata) ? "true" : "false");
+
+ }
+
+ if (v->PrefetchModeSupported == true && v->ImmediateFlipSupport == true) {
+ v->BandwidthAvailableForImmediateFlip = v->ReturnBW;
+ for (k = 0; k < v->NumberOfActivePlanes; ++k) {
+ v->BandwidthAvailableForImmediateFlip =
+ v->BandwidthAvailableForImmediateFlip
+ - dml_max(
+ v->ReadBandwidthPlaneLuma[k] * v->UrgentBurstFactorLuma[k]
+ + v->ReadBandwidthPlaneChroma[k] * v->UrgentBurstFactorChroma[k]
+ + v->cursor_bw[k] * v->UrgentBurstFactorCursor[k],
+ v->DPPPerPlane[k] * (v->RequiredPrefetchPixDataBWLuma[k] * v->UrgentBurstFactorLumaPre[k] +
+ v->RequiredPrefetchPixDataBWChroma[k] * v->UrgentBurstFactorChromaPre[k]) +
+ v->cursor_bw_pre[k] * v->UrgentBurstFactorCursorPre[k]);
+ }
+
+ v->TotImmediateFlipBytes = 0;
+ for (k = 0; k < v->NumberOfActivePlanes; ++k) {
+ v->TotImmediateFlipBytes = v->TotImmediateFlipBytes + v->DPPPerPlane[k] * (v->PDEAndMetaPTEBytesFrame[k] + v->MetaRowByte[k] + v->PixelPTEBytesPerRow[k]);
+ }
+ for (k = 0; k < v->NumberOfActivePlanes; ++k) {
+ CalculateFlipSchedule(
+ mode_lib,
+ v->PercentOfIdealDRAMFabricAndSDPPortBWReceivedAfterUrgLatencyPixelMixedWithVMData,
+ v->PercentOfIdealDRAMFabricAndSDPPortBWReceivedAfterUrgLatencyVMDataOnly,
+ v->UrgentExtraLatency,
+ v->UrgentLatency,
+ v->GPUVMMaxPageTableLevels,
+ v->HostVMEnable,
+ v->HostVMMaxNonCachedPageTableLevels,
+ v->GPUVMEnable,
+ v->HostVMMinPageSize,
+ v->PDEAndMetaPTEBytesFrame[k],
+ v->MetaRowByte[k],
+ v->PixelPTEBytesPerRow[k],
+ v->BandwidthAvailableForImmediateFlip,
+ v->TotImmediateFlipBytes,
+ v->SourcePixelFormat[k],
+ v->HTotal[k] / v->PixelClock[k],
+ v->VRatio[k],
+ v->VRatioChroma[k],
+ v->Tno_bw[k],
+ v->DCCEnable[k],
+ v->dpte_row_height[k],
+ v->meta_row_height[k],
+ v->dpte_row_height_chroma[k],
+ v->meta_row_height_chroma[k],
+ &v->DestinationLinesToRequestVMInImmediateFlip[k],
+ &v->DestinationLinesToRequestRowInImmediateFlip[k],
+ &v->final_flip_bw[k],
+ &v->ImmediateFlipSupportedForPipe[k]);
+ }
+ v->total_dcn_read_bw_with_flip = 0.0;
+ v->total_dcn_read_bw_with_flip_no_urgent_burst = 0.0;
+ for (k = 0; k < v->NumberOfActivePlanes; ++k) {
+ v->total_dcn_read_bw_with_flip = v->total_dcn_read_bw_with_flip + dml_max3(
+ v->DPPPerPlane[k] * v->prefetch_vmrow_bw[k],
+ v->DPPPerPlane[k] * v->final_flip_bw[k] +
+ v->ReadBandwidthLuma[k] * v->UrgentBurstFactorLuma[k] +
+ v->ReadBandwidthChroma[k] * v->UrgentBurstFactorChroma[k] +
+ v->cursor_bw[k] * v->UrgentBurstFactorCursor[k],
+ v->DPPPerPlane[k] * (v->final_flip_bw[k] +
+ v->RequiredPrefetchPixDataBWLuma[k] * v->UrgentBurstFactorLumaPre[k] +
+ v->RequiredPrefetchPixDataBWChroma[k] * v->UrgentBurstFactorChromaPre[k]) +
+ v->cursor_bw_pre[k] * v->UrgentBurstFactorCursorPre[k]);
+ v->total_dcn_read_bw_with_flip_no_urgent_burst =
+ v->total_dcn_read_bw_with_flip_no_urgent_burst +
+ dml_max3(v->DPPPerPlane[k] * v->prefetch_vmrow_bw[k],
+ v->DPPPerPlane[k] * v->final_flip_bw[k] + v->ReadBandwidthPlaneLuma[k] + v->ReadBandwidthPlaneChroma[k] + v->cursor_bw[k],
+ v->DPPPerPlane[k] * (v->final_flip_bw[k] + v->RequiredPrefetchPixDataBWLuma[k] + v->RequiredPrefetchPixDataBWChroma[k]) + v->cursor_bw_pre[k]);
+
+ }
+ v->FractionOfUrgentBandwidthImmediateFlip = v->total_dcn_read_bw_with_flip_no_urgent_burst / v->ReturnBW;
+
+ v->ImmediateFlipSupported = true;
+ if (v->total_dcn_read_bw_with_flip > v->ReturnBW) {
+ v->ImmediateFlipSupported = false;
+ v->total_dcn_read_bw_with_flip = MaxTotalRDBandwidth;
+ }
+ for (k = 0; k < v->NumberOfActivePlanes; ++k) {
+ if (v->ImmediateFlipSupportedForPipe[k] == false) {
+ v->ImmediateFlipSupported = false;
+ }
+ }
+ } else {
+ v->ImmediateFlipSupported = false;
+ }
+
+ for (k = 0; k < v->NumberOfActivePlanes; ++k) {
+ if (v->ErrorResult[k]) {
+ v->PrefetchModeSupported = false;
+ dml_print("DML: CalculatePrefetchSchedule ***failed***. Prefetch schedule violation. Results are NOT valid\n");
+ }
+ }
+
+ v->VStartupLines = v->VStartupLines + 1;
+ v->PrefetchAndImmediateFlipSupported = (v->PrefetchModeSupported == true && ((!v->ImmediateFlipSupport && !v->HostVMEnable && v->ImmediateFlipRequirement != dm_immediate_flip_required) || v->ImmediateFlipSupported)) ? true : false;
+
+ } while (!v->PrefetchModeSupported && v->VStartupLines <= v->MaximumMaxVStartupLines);
+ ASSERT(v->PrefetchModeSupported);
+
+ //Watermarks and NB P-State/DRAM Clock Change Support
+ {
+ enum clock_change_support DRAMClockChangeSupport = 0; // dummy
+ CalculateWatermarksAndDRAMSpeedChangeSupport(
+ mode_lib,
+ PrefetchMode,
+ v->NumberOfActivePlanes,
+ v->MaxLineBufferLines,
+ v->LineBufferSize,
+ v->DPPOutputBufferPixels,
+ v->DETBufferSizeInKByte,
+ v->WritebackInterfaceBufferSize,
+ v->DCFCLK,
+ v->ReturnBW,
+ v->GPUVMEnable,
+ v->dpte_group_bytes,
+ v->MetaChunkSize,
+ v->UrgentLatency,
+ v->UrgentExtraLatency,
+ v->WritebackLatency,
+ v->WritebackChunkSize,
+ v->SOCCLK,
+ v->FinalDRAMClockChangeLatency,
+ v->SRExitTime,
+ v->SREnterPlusExitTime,
+ v->DCFCLKDeepSleep,
+ v->DPPPerPlane,
+ v->DCCEnable,
+ v->DPPCLK,
+ v->DETBufferSizeY,
+ v->DETBufferSizeC,
+ v->SwathHeightY,
+ v->SwathHeightC,
+ v->LBBitPerPixel,
+ v->SwathWidthY,
+ v->SwathWidthC,
+ v->HRatio,
+ v->HRatioChroma,
+ v->vtaps,
+ v->VTAPsChroma,
+ v->VRatio,
+ v->VRatioChroma,
+ v->HTotal,
+ v->PixelClock,
+ v->BlendingAndTiming,
+ v->BytePerPixelDETY,
+ v->BytePerPixelDETC,
+ v->DSTXAfterScaler,
+ v->DSTYAfterScaler,
+ v->WritebackEnable,
+ v->WritebackPixelFormat,
+ v->WritebackDestinationWidth,
+ v->WritebackDestinationHeight,
+ v->WritebackSourceHeight,
+ &DRAMClockChangeSupport,
+ &v->UrgentWatermark,
+ &v->WritebackUrgentWatermark,
+ &v->DRAMClockChangeWatermark,
+ &v->WritebackDRAMClockChangeWatermark,
+ &v->StutterExitWatermark,
+ &v->StutterEnterPlusExitWatermark,
+ &v->MinActiveDRAMClockChangeLatencySupported);
+
+ for (k = 0; k < v->NumberOfActivePlanes; ++k) {
+ if (v->WritebackEnable[k] == true) {
+ if (v->BlendingAndTiming[k] == k) {
+ v->ThisVStartup = v->VStartup[k];
+ } else {
+ for (j = 0; j < v->NumberOfActivePlanes; ++j) {
+ if (v->BlendingAndTiming[k] == j) {
+ v->ThisVStartup = v->VStartup[j];
+ }
+ }
+ }
+ v->WritebackAllowDRAMClockChangeEndPosition[k] = dml_max(0,
+ v->ThisVStartup * v->HTotal[k] / v->PixelClock[k] - v->WritebackDRAMClockChangeWatermark);
+ } else {
+ v->WritebackAllowDRAMClockChangeEndPosition[k] = 0;
+ }
+ }
+
+ }
+
+
+ //Display Pipeline Delivery Time in Prefetch, Groups
+ CalculatePixelDeliveryTimes(
+ v->NumberOfActivePlanes,
+ v->VRatio,
+ v->VRatioChroma,
+ v->VRatioPrefetchY,
+ v->VRatioPrefetchC,
+ v->swath_width_luma_ub,
+ v->swath_width_chroma_ub,
+ v->DPPPerPlane,
+ v->HRatio,
+ v->HRatioChroma,
+ v->PixelClock,
+ v->PSCL_THROUGHPUT_LUMA,
+ v->PSCL_THROUGHPUT_CHROMA,
+ v->DPPCLK,
+ v->BytePerPixelC,
+ v->SourceScan,
+ v->NumberOfCursors,
+ v->CursorWidth,
+ v->CursorBPP,
+ v->BlockWidth256BytesY,
+ v->BlockHeight256BytesY,
+ v->BlockWidth256BytesC,
+ v->BlockHeight256BytesC,
+ v->DisplayPipeLineDeliveryTimeLuma,
+ v->DisplayPipeLineDeliveryTimeChroma,
+ v->DisplayPipeLineDeliveryTimeLumaPrefetch,
+ v->DisplayPipeLineDeliveryTimeChromaPrefetch,
+ v->DisplayPipeRequestDeliveryTimeLuma,
+ v->DisplayPipeRequestDeliveryTimeChroma,
+ v->DisplayPipeRequestDeliveryTimeLumaPrefetch,
+ v->DisplayPipeRequestDeliveryTimeChromaPrefetch,
+ v->CursorRequestDeliveryTime,
+ v->CursorRequestDeliveryTimePrefetch);
+
+ CalculateMetaAndPTETimes(
+ v->NumberOfActivePlanes,
+ v->GPUVMEnable,
+ v->MetaChunkSize,
+ v->MinMetaChunkSizeBytes,
+ v->HTotal,
+ v->VRatio,
+ v->VRatioChroma,
+ v->DestinationLinesToRequestRowInVBlank,
+ v->DestinationLinesToRequestRowInImmediateFlip,
+ v->DCCEnable,
+ v->PixelClock,
+ v->BytePerPixelY,
+ v->BytePerPixelC,
+ v->SourceScan,
+ v->dpte_row_height,
+ v->dpte_row_height_chroma,
+ v->meta_row_width,
+ v->meta_row_width_chroma,
+ v->meta_row_height,
+ v->meta_row_height_chroma,
+ v->meta_req_width,
+ v->meta_req_width_chroma,
+ v->meta_req_height,
+ v->meta_req_height_chroma,
+ v->dpte_group_bytes,
+ v->PTERequestSizeY,
+ v->PTERequestSizeC,
+ v->PixelPTEReqWidthY,
+ v->PixelPTEReqHeightY,
+ v->PixelPTEReqWidthC,
+ v->PixelPTEReqHeightC,
+ v->dpte_row_width_luma_ub,
+ v->dpte_row_width_chroma_ub,
+ v->DST_Y_PER_PTE_ROW_NOM_L,
+ v->DST_Y_PER_PTE_ROW_NOM_C,
+ v->DST_Y_PER_META_ROW_NOM_L,
+ v->DST_Y_PER_META_ROW_NOM_C,
+ v->TimePerMetaChunkNominal,
+ v->TimePerChromaMetaChunkNominal,
+ v->TimePerMetaChunkVBlank,
+ v->TimePerChromaMetaChunkVBlank,
+ v->TimePerMetaChunkFlip,
+ v->TimePerChromaMetaChunkFlip,
+ v->time_per_pte_group_nom_luma,
+ v->time_per_pte_group_vblank_luma,
+ v->time_per_pte_group_flip_luma,
+ v->time_per_pte_group_nom_chroma,
+ v->time_per_pte_group_vblank_chroma,
+ v->time_per_pte_group_flip_chroma);
+
+ CalculateVMGroupAndRequestTimes(
+ v->NumberOfActivePlanes,
+ v->GPUVMEnable,
+ v->GPUVMMaxPageTableLevels,
+ v->HTotal,
+ v->BytePerPixelC,
+ v->DestinationLinesToRequestVMInVBlank,
+ v->DestinationLinesToRequestVMInImmediateFlip,
+ v->DCCEnable,
+ v->PixelClock,
+ v->dpte_row_width_luma_ub,
+ v->dpte_row_width_chroma_ub,
+ v->vm_group_bytes,
+ v->dpde0_bytes_per_frame_ub_l,
+ v->dpde0_bytes_per_frame_ub_c,
+ v->meta_pte_bytes_per_frame_ub_l,
+ v->meta_pte_bytes_per_frame_ub_c,
+ v->TimePerVMGroupVBlank,
+ v->TimePerVMGroupFlip,
+ v->TimePerVMRequestVBlank,
+ v->TimePerVMRequestFlip);
+
+
+ // Min TTUVBlank
+ for (k = 0; k < v->NumberOfActivePlanes; ++k) {
+ if (PrefetchMode == 0) {
+ v->AllowDRAMClockChangeDuringVBlank[k] = true;
+ v->AllowDRAMSelfRefreshDuringVBlank[k] = true;
+ v->MinTTUVBlank[k] = dml_max(
+ v->DRAMClockChangeWatermark,
+ dml_max(
+ v->StutterEnterPlusExitWatermark,
+ v->UrgentWatermark));
+ } else if (PrefetchMode == 1) {
+ v->AllowDRAMClockChangeDuringVBlank[k] = false;
+ v->AllowDRAMSelfRefreshDuringVBlank[k] = true;
+ v->MinTTUVBlank[k] = dml_max(
+ v->StutterEnterPlusExitWatermark,
+ v->UrgentWatermark);
+ } else {
+ v->AllowDRAMClockChangeDuringVBlank[k] = false;
+ v->AllowDRAMSelfRefreshDuringVBlank[k] = false;
+ v->MinTTUVBlank[k] = v->UrgentWatermark;
+ }
+ if (!v->DynamicMetadataEnable[k])
+ v->MinTTUVBlank[k] = v->TCalc
+ + v->MinTTUVBlank[k];
+ }
+
+ // DCC Configuration
+ v->ActiveDPPs = 0;
+ for (k = 0; k < v->NumberOfActivePlanes; ++k) {
+ CalculateDCCConfiguration(v->DCCEnable[k], false, // We should always know the direction DCCProgrammingAssumesScanDirectionUnknown,
+ v->SourcePixelFormat[k],
+ v->SurfaceWidthY[k],
+ v->SurfaceWidthC[k],
+ v->SurfaceHeightY[k],
+ v->SurfaceHeightC[k],
+ v->DETBufferSizeInKByte * 1024,
+ v->BlockHeight256BytesY[k],
+ v->BlockHeight256BytesC[k],
+ v->SurfaceTiling[k],
+ v->BytePerPixelY[k],
+ v->BytePerPixelC[k],
+ v->BytePerPixelDETY[k],
+ v->BytePerPixelDETC[k],
+ v->SourceScan[k],
+ &v->DCCYMaxUncompressedBlock[k],
+ &v->DCCCMaxUncompressedBlock[k],
+ &v->DCCYMaxCompressedBlock[k],
+ &v->DCCCMaxCompressedBlock[k],
+ &v->DCCYIndependentBlock[k],
+ &v->DCCCIndependentBlock[k]);
+ }
+
+ {
+ //Maximum Bandwidth Used
+ double TotalWRBandwidth = 0;
+ double MaxPerPlaneVActiveWRBandwidth = 0;
+ double WRBandwidth = 0;
+ double MaxUsedBW = 0;
+ for (k = 0; k < v->NumberOfActivePlanes; ++k) {
+ if (v->WritebackEnable[k] == true
+ && v->WritebackPixelFormat[k] == dm_444_32) {
+ WRBandwidth = v->WritebackDestinationWidth[k] * v->WritebackDestinationHeight[k]
+ / (v->HTotal[k] * v->WritebackSourceHeight[k] / v->PixelClock[k]) * 4;
+ } else if (v->WritebackEnable[k] == true) {
+ WRBandwidth = v->WritebackDestinationWidth[k] * v->WritebackDestinationHeight[k]
+ / (v->HTotal[k] * v->WritebackSourceHeight[k] / v->PixelClock[k]) * 8;
+ }
+ TotalWRBandwidth = TotalWRBandwidth + WRBandwidth;
+ MaxPerPlaneVActiveWRBandwidth = dml_max(MaxPerPlaneVActiveWRBandwidth, WRBandwidth);
+ }
+
+ v->TotalDataReadBandwidth = 0;
+ for (k = 0; k < v->NumberOfActivePlanes; ++k) {
+ v->TotalDataReadBandwidth = v->TotalDataReadBandwidth
+ + v->ReadBandwidthPlaneLuma[k]
+ + v->ReadBandwidthPlaneChroma[k];
+ }
+
+ {
+ double MaxPerPlaneVActiveRDBandwidth = 0;
+ for (k = 0; k < v->NumberOfActivePlanes; ++k) {
+ MaxPerPlaneVActiveRDBandwidth = dml_max(MaxPerPlaneVActiveRDBandwidth,
+ v->ReadBandwidthPlaneLuma[k] + v->ReadBandwidthPlaneChroma[k]);
+
+ }
+ }
+
+ MaxUsedBW = MaxTotalRDBandwidth + TotalWRBandwidth;
+ }
+
+ // VStartup Margin
+ v->VStartupMargin = 0;
+ v->FirstMainPlane = true;
+ for (k = 0; k < v->NumberOfActivePlanes; ++k) {
+ if (v->BlendingAndTiming[k] == k) {
+ double margin = (v->MaxVStartupLines[k] - v->VStartup[k]) * v->HTotal[k]
+ / v->PixelClock[k];
+ if (v->FirstMainPlane == true) {
+ v->VStartupMargin = margin;
+ v->FirstMainPlane = false;
+ } else {
+ v->VStartupMargin = dml_min(v->VStartupMargin, margin);
+ }
+ }
+ }
+
+ // Stutter Efficiency
+ CalculateStutterEfficiency(
+ v->NumberOfActivePlanes,
+ v->ROBBufferSizeInKByte,
+ v->TotalDataReadBandwidth,
+ v->DCFCLK,
+ v->ReturnBW,
+ v->SRExitTime,
+ v->SynchronizedVBlank,
+ v->DPPPerPlane,
+ v->DETBufferSizeY,
+ v->BytePerPixelY,
+ v->BytePerPixelDETY,
+ v->SwathWidthY,
+ v->SwathHeightY,
+ v->SwathHeightC,
+ v->DCCRateLuma,
+ v->DCCRateChroma,
+ v->HTotal,
+ v->VTotal,
+ v->PixelClock,
+ v->VRatio,
+ v->SourceScan,
+ v->BlockHeight256BytesY,
+ v->BlockWidth256BytesY,
+ v->BlockHeight256BytesC,
+ v->BlockWidth256BytesC,
+ v->DCCYMaxUncompressedBlock,
+ v->DCCCMaxUncompressedBlock,
+ v->VActive,
+ v->DCCEnable,
+ v->WritebackEnable,
+ v->ReadBandwidthPlaneLuma,
+ v->ReadBandwidthPlaneChroma,
+ v->meta_row_bw,
+ v->dpte_row_bw,
+ &v->StutterEfficiencyNotIncludingVBlank,
+ &v->StutterEfficiency);
+}
+
+static void DisplayPipeConfiguration(struct display_mode_lib *mode_lib)
+{
+ // Display Pipe Configuration
+ double BytePerPixDETY[DC__NUM_DPP__MAX] = { 0 };
+ double BytePerPixDETC[DC__NUM_DPP__MAX] = { 0 };
+ int BytePerPixY[DC__NUM_DPP__MAX] = { 0 };
+ int BytePerPixC[DC__NUM_DPP__MAX] = { 0 };
+ int Read256BytesBlockHeightY[DC__NUM_DPP__MAX] = { 0 };
+ int Read256BytesBlockHeightC[DC__NUM_DPP__MAX] = { 0 };
+ int Read256BytesBlockWidthY[DC__NUM_DPP__MAX] = { 0 };
+ int Read256BytesBlockWidthC[DC__NUM_DPP__MAX] = { 0 };
+ double dummy1[DC__NUM_DPP__MAX] = { 0 };
+ double dummy2[DC__NUM_DPP__MAX] = { 0 };
+ double dummy3[DC__NUM_DPP__MAX] = { 0 };
+ double dummy4[DC__NUM_DPP__MAX] = { 0 };
+ int dummy5[DC__NUM_DPP__MAX] = { 0 };
+ int dummy6[DC__NUM_DPP__MAX] = { 0 };
+ bool dummy7[DC__NUM_DPP__MAX] = { 0 };
+ bool dummysinglestring = 0;
+ unsigned int k;
+
+ for (k = 0; k < mode_lib->vba.NumberOfActivePlanes; ++k) {
+
+ CalculateBytePerPixelAnd256BBlockSizes(
+ mode_lib->vba.SourcePixelFormat[k],
+ mode_lib->vba.SurfaceTiling[k],
+ &BytePerPixY[k],
+ &BytePerPixC[k],
+ &BytePerPixDETY[k],
+ &BytePerPixDETC[k],
+ &Read256BytesBlockHeightY[k],
+ &Read256BytesBlockHeightC[k],
+ &Read256BytesBlockWidthY[k],
+ &Read256BytesBlockWidthC[k]);
+ }
+ CalculateSwathAndDETConfiguration(
+ false,
+ mode_lib->vba.NumberOfActivePlanes,
+ mode_lib->vba.DETBufferSizeInKByte,
+ dummy1,
+ dummy2,
+ mode_lib->vba.SourceScan,
+ mode_lib->vba.SourcePixelFormat,
+ mode_lib->vba.SurfaceTiling,
+ mode_lib->vba.ViewportWidth,
+ mode_lib->vba.ViewportHeight,
+ mode_lib->vba.SurfaceWidthY,
+ mode_lib->vba.SurfaceWidthC,
+ mode_lib->vba.SurfaceHeightY,
+ mode_lib->vba.SurfaceHeightC,
+ Read256BytesBlockHeightY,
+ Read256BytesBlockHeightC,
+ Read256BytesBlockWidthY,
+ Read256BytesBlockWidthC,
+ mode_lib->vba.ODMCombineEnabled,
+ mode_lib->vba.BlendingAndTiming,
+ BytePerPixY,
+ BytePerPixC,
+ BytePerPixDETY,
+ BytePerPixDETC,
+ mode_lib->vba.HActive,
+ mode_lib->vba.HRatio,
+ mode_lib->vba.HRatioChroma,
+ mode_lib->vba.DPPPerPlane,
+ dummy5,
+ dummy6,
+ dummy3,
+ dummy4,
+ mode_lib->vba.SwathHeightY,
+ mode_lib->vba.SwathHeightC,
+ mode_lib->vba.DETBufferSizeY,
+ mode_lib->vba.DETBufferSizeC,
+ dummy7,
+ &dummysinglestring);
+}
+
+static bool CalculateBytePerPixelAnd256BBlockSizes(
+ enum source_format_class SourcePixelFormat,
+ enum dm_swizzle_mode SurfaceTiling,
+ unsigned int *BytePerPixelY,
+ unsigned int *BytePerPixelC,
+ double *BytePerPixelDETY,
+ double *BytePerPixelDETC,
+ unsigned int *BlockHeight256BytesY,
+ unsigned int *BlockHeight256BytesC,
+ unsigned int *BlockWidth256BytesY,
+ unsigned int *BlockWidth256BytesC)
+{
+ if (SourcePixelFormat == dm_444_64) {
+ *BytePerPixelDETY = 8;
+ *BytePerPixelDETC = 0;
+ *BytePerPixelY = 8;
+ *BytePerPixelC = 0;
+ } else if (SourcePixelFormat == dm_444_32 || SourcePixelFormat == dm_rgbe) {
+ *BytePerPixelDETY = 4;
+ *BytePerPixelDETC = 0;
+ *BytePerPixelY = 4;
+ *BytePerPixelC = 0;
+ } else if (SourcePixelFormat == dm_444_16 || SourcePixelFormat == dm_444_16) {
+ *BytePerPixelDETY = 2;
+ *BytePerPixelDETC = 0;
+ *BytePerPixelY = 2;
+ *BytePerPixelC = 0;
+ } else if (SourcePixelFormat == dm_444_8) {
+ *BytePerPixelDETY = 1;
+ *BytePerPixelDETC = 0;
+ *BytePerPixelY = 1;
+ *BytePerPixelC = 0;
+ } else if (SourcePixelFormat == dm_rgbe_alpha) {
+ *BytePerPixelDETY = 4;
+ *BytePerPixelDETC = 1;
+ *BytePerPixelY = 4;
+ *BytePerPixelC = 1;
+ } else if (SourcePixelFormat == dm_420_8) {
+ *BytePerPixelDETY = 1;
+ *BytePerPixelDETC = 2;
+ *BytePerPixelY = 1;
+ *BytePerPixelC = 2;
+ } else if (SourcePixelFormat == dm_420_12) {
+ *BytePerPixelDETY = 2;
+ *BytePerPixelDETC = 4;
+ *BytePerPixelY = 2;
+ *BytePerPixelC = 4;
+ } else {
+ *BytePerPixelDETY = 4.0 / 3;
+ *BytePerPixelDETC = 8.0 / 3;
+ *BytePerPixelY = 2;
+ *BytePerPixelC = 4;
+ }
+
+ if ((SourcePixelFormat == dm_444_64 || SourcePixelFormat == dm_444_32
+ || SourcePixelFormat == dm_444_16 || SourcePixelFormat == dm_444_8
+ || SourcePixelFormat == dm_mono_16 || SourcePixelFormat == dm_mono_8
+ || SourcePixelFormat == dm_rgbe)) {
+ if (SurfaceTiling == dm_sw_linear) {
+ *BlockHeight256BytesY = 1;
+ } else if (SourcePixelFormat == dm_444_64) {
+ *BlockHeight256BytesY = 4;
+ } else if (SourcePixelFormat == dm_444_8) {
+ *BlockHeight256BytesY = 16;
+ } else {
+ *BlockHeight256BytesY = 8;
+ }
+ *BlockWidth256BytesY = 256U / *BytePerPixelY / *BlockHeight256BytesY;
+ *BlockHeight256BytesC = 0;
+ *BlockWidth256BytesC = 0;
+ } else {
+ if (SurfaceTiling == dm_sw_linear) {
+ *BlockHeight256BytesY = 1;
+ *BlockHeight256BytesC = 1;
+ } else if (SourcePixelFormat == dm_rgbe_alpha) {
+ *BlockHeight256BytesY = 8;
+ *BlockHeight256BytesC = 16;
+ } else if (SourcePixelFormat == dm_420_8) {
+ *BlockHeight256BytesY = 16;
+ *BlockHeight256BytesC = 8;
+ } else {
+ *BlockHeight256BytesY = 8;
+ *BlockHeight256BytesC = 8;
+ }
+ *BlockWidth256BytesY = 256U / *BytePerPixelY / *BlockHeight256BytesY;
+ *BlockWidth256BytesC = 256U / *BytePerPixelC / *BlockHeight256BytesC;
+ }
+ return true;
+}
+
+static double CalculateTWait(
+ unsigned int PrefetchMode,
+ double DRAMClockChangeLatency,
+ double UrgentLatency,
+ double SREnterPlusExitTime)
+{
+ if (PrefetchMode == 0) {
+ return dml_max(DRAMClockChangeLatency + UrgentLatency,
+ dml_max(SREnterPlusExitTime, UrgentLatency));
+ } else if (PrefetchMode == 1) {
+ return dml_max(SREnterPlusExitTime, UrgentLatency);
+ } else {
+ return UrgentLatency;
+ }
+}
+
+double dml30_CalculateWriteBackDISPCLK(
+ enum source_format_class WritebackPixelFormat,
+ double PixelClock,
+ double WritebackHRatio,
+ double WritebackVRatio,
+ unsigned int WritebackHTaps,
+ unsigned int WritebackVTaps,
+ long WritebackSourceWidth,
+ long WritebackDestinationWidth,
+ unsigned int HTotal,
+ unsigned int WritebackLineBufferSize)
+{
+ double DISPCLK_H = 0, DISPCLK_V = 0, DISPCLK_HB = 0;
+
+ DISPCLK_H = PixelClock * dml_ceil(WritebackHTaps / 8.0, 1) / WritebackHRatio;
+ DISPCLK_V = PixelClock * (WritebackVTaps * dml_ceil(WritebackDestinationWidth / 6.0, 1) + 8.0) / HTotal;
+ DISPCLK_HB = PixelClock * WritebackVTaps * (WritebackDestinationWidth * WritebackVTaps - WritebackLineBufferSize / 57.0) / 6.0 / WritebackSourceWidth;
+ return dml_max3(DISPCLK_H, DISPCLK_V, DISPCLK_HB);
+}
+
+static double CalculateWriteBackDelay(
+ enum source_format_class WritebackPixelFormat,
+ double WritebackHRatio,
+ double WritebackVRatio,
+ unsigned int WritebackVTaps,
+ long WritebackDestinationWidth,
+ long WritebackDestinationHeight,
+ long WritebackSourceHeight,
+ unsigned int HTotal)
+{
+ double CalculateWriteBackDelay = 0;
+ double Line_length = 0;
+ double Output_lines_last_notclamped = 0;
+ double WritebackVInit = 0;
+
+ WritebackVInit = (WritebackVRatio + WritebackVTaps + 1) / 2;
+ Line_length = dml_max((double) WritebackDestinationWidth, dml_ceil(WritebackDestinationWidth / 6.0, 1) * WritebackVTaps);
+ Output_lines_last_notclamped = WritebackDestinationHeight - 1 - dml_ceil((WritebackSourceHeight - WritebackVInit) / WritebackVRatio, 1);
+ if (Output_lines_last_notclamped < 0) {
+ CalculateWriteBackDelay = 0;
+ } else {
+ CalculateWriteBackDelay = Output_lines_last_notclamped * Line_length + (HTotal - WritebackDestinationWidth) + 80;
+ }
+ return CalculateWriteBackDelay;
+}
+
+
+static void CalculateDynamicMetadataParameters(int MaxInterDCNTileRepeaters, double DPPCLK, double DISPCLK,
+ double DCFClkDeepSleep, double PixelClock, long HTotal, long VBlank, long DynamicMetadataTransmittedBytes,
+ long DynamicMetadataLinesBeforeActiveRequired, int InterlaceEnable, bool ProgressiveToInterlaceUnitInOPP,
+ double *Tsetup, double *Tdmbf, double *Tdmec, double *Tdmsks)
+{
+ double TotalRepeaterDelayTime = 0;
+ double VUpdateWidthPix = 0;
+ double VReadyOffsetPix = 0;
+ double VUpdateOffsetPix = 0;
+ TotalRepeaterDelayTime = MaxInterDCNTileRepeaters * (2 / DPPCLK + 3 / DISPCLK);
+ VUpdateWidthPix = (14 / DCFClkDeepSleep + 12 / DPPCLK + TotalRepeaterDelayTime) * PixelClock;
+ VReadyOffsetPix = dml_max(150.0 / DPPCLK, TotalRepeaterDelayTime + 20 / DCFClkDeepSleep + 10 / DPPCLK) * PixelClock;
+ VUpdateOffsetPix = dml_ceil(HTotal / 4.0, 1);
+ *Tsetup = (VUpdateOffsetPix + VUpdateWidthPix + VReadyOffsetPix) / PixelClock;
+ *Tdmbf = DynamicMetadataTransmittedBytes / 4.0 / DISPCLK;
+ *Tdmec = HTotal / PixelClock;
+ if (DynamicMetadataLinesBeforeActiveRequired == 0) {
+ *Tdmsks = VBlank * HTotal / PixelClock / 2.0;
+ } else {
+ *Tdmsks = DynamicMetadataLinesBeforeActiveRequired * HTotal / PixelClock;
+ }
+ if (InterlaceEnable == 1 && ProgressiveToInterlaceUnitInOPP == false) {
+ *Tdmsks = *Tdmsks / 2;
+ }
+}
+
+static void CalculateRowBandwidth(
+ bool GPUVMEnable,
+ enum source_format_class SourcePixelFormat,
+ double VRatio,
+ double VRatioChroma,
+ bool DCCEnable,
+ double LineTime,
+ unsigned int MetaRowByteLuma,
+ unsigned int MetaRowByteChroma,
+ unsigned int meta_row_height_luma,
+ unsigned int meta_row_height_chroma,
+ unsigned int PixelPTEBytesPerRowLuma,
+ unsigned int PixelPTEBytesPerRowChroma,
+ unsigned int dpte_row_height_luma,
+ unsigned int dpte_row_height_chroma,
+ double *meta_row_bw,
+ double *dpte_row_bw)
+{
+ if (DCCEnable != true) {
+ *meta_row_bw = 0;
+ } else if (SourcePixelFormat == dm_420_8 || SourcePixelFormat == dm_420_10 || SourcePixelFormat == dm_420_12 || SourcePixelFormat == dm_rgbe_alpha) {
+ *meta_row_bw = VRatio * MetaRowByteLuma / (meta_row_height_luma * LineTime)
+ + VRatioChroma * MetaRowByteChroma
+ / (meta_row_height_chroma * LineTime);
+ } else {
+ *meta_row_bw = VRatio * MetaRowByteLuma / (meta_row_height_luma * LineTime);
+ }
+
+ if (GPUVMEnable != true) {
+ *dpte_row_bw = 0;
+ } else if (SourcePixelFormat == dm_420_8 || SourcePixelFormat == dm_420_10 || SourcePixelFormat == dm_420_12 || SourcePixelFormat == dm_rgbe_alpha) {
+ *dpte_row_bw = VRatio * PixelPTEBytesPerRowLuma / (dpte_row_height_luma * LineTime)
+ + VRatioChroma * PixelPTEBytesPerRowChroma
+ / (dpte_row_height_chroma * LineTime);
+ } else {
+ *dpte_row_bw = VRatio * PixelPTEBytesPerRowLuma / (dpte_row_height_luma * LineTime);
+ }
+}
+
+static void CalculateFlipSchedule(
+ struct display_mode_lib *mode_lib,
+ double PercentOfIdealDRAMFabricAndSDPPortBWReceivedAfterUrgLatencyPixelMixedWithVMData,
+ double PercentOfIdealDRAMFabricAndSDPPortBWReceivedAfterUrgLatencyVMDataOnly,
+ double UrgentExtraLatency,
+ double UrgentLatency,
+ unsigned int GPUVMMaxPageTableLevels,
+ bool HostVMEnable,
+ unsigned int HostVMMaxNonCachedPageTableLevels,
+ bool GPUVMEnable,
+ double HostVMMinPageSize,
+ double PDEAndMetaPTEBytesPerFrame,
+ double MetaRowBytes,
+ double DPTEBytesPerRow,
+ double BandwidthAvailableForImmediateFlip,
+ unsigned int TotImmediateFlipBytes,
+ enum source_format_class SourcePixelFormat,
+ double LineTime,
+ double VRatio,
+ double VRatioChroma,
+ double Tno_bw,
+ bool DCCEnable,
+ unsigned int dpte_row_height,
+ unsigned int meta_row_height,
+ unsigned int dpte_row_height_chroma,
+ unsigned int meta_row_height_chroma,
+ double *DestinationLinesToRequestVMInImmediateFlip,
+ double *DestinationLinesToRequestRowInImmediateFlip,
+ double *final_flip_bw,
+ bool *ImmediateFlipSupportedForPipe)
+{
+ double min_row_time = 0.0;
+ unsigned int HostVMDynamicLevelsTrips = 0;
+ double TimeForFetchingMetaPTEImmediateFlip = 0;
+ double TimeForFetchingRowInVBlankImmediateFlip = 0;
+ double ImmediateFlipBW = 0;
+ double HostVMInefficiencyFactor = 0;
+
+ if (GPUVMEnable == true && HostVMEnable == true) {
+ HostVMInefficiencyFactor = PercentOfIdealDRAMFabricAndSDPPortBWReceivedAfterUrgLatencyPixelMixedWithVMData / PercentOfIdealDRAMFabricAndSDPPortBWReceivedAfterUrgLatencyVMDataOnly;
+ HostVMDynamicLevelsTrips = HostVMMaxNonCachedPageTableLevels;
+ } else {
+ HostVMInefficiencyFactor = 1;
+ HostVMDynamicLevelsTrips = 0;
+ }
+
+ if (GPUVMEnable == true || DCCEnable == true) {
+ ImmediateFlipBW = (PDEAndMetaPTEBytesPerFrame + MetaRowBytes + DPTEBytesPerRow) * BandwidthAvailableForImmediateFlip / TotImmediateFlipBytes;
+ }
+
+ if (GPUVMEnable == true) {
+ TimeForFetchingMetaPTEImmediateFlip = dml_max3(Tno_bw + PDEAndMetaPTEBytesPerFrame * HostVMInefficiencyFactor / ImmediateFlipBW,
+ UrgentExtraLatency + UrgentLatency * (GPUVMMaxPageTableLevels * (HostVMDynamicLevelsTrips + 1) - 1), LineTime / 4.0);
+ } else {
+ TimeForFetchingMetaPTEImmediateFlip = 0;
+ }
+
+ *DestinationLinesToRequestVMInImmediateFlip = dml_ceil(4.0 * (TimeForFetchingMetaPTEImmediateFlip / LineTime), 1) / 4.0;
+ if ((GPUVMEnable == true || DCCEnable == true)) {
+ TimeForFetchingRowInVBlankImmediateFlip = dml_max3((MetaRowBytes + DPTEBytesPerRow * HostVMInefficiencyFactor) / ImmediateFlipBW,
+ UrgentLatency * (HostVMDynamicLevelsTrips + 1), LineTime / 4);
+ } else {
+ TimeForFetchingRowInVBlankImmediateFlip = 0;
+ }
+
+ *DestinationLinesToRequestRowInImmediateFlip = dml_ceil(4.0 * (TimeForFetchingRowInVBlankImmediateFlip / LineTime), 1) / 4.0;
+
+ if (GPUVMEnable == true) {
+ *final_flip_bw = dml_max(PDEAndMetaPTEBytesPerFrame * HostVMInefficiencyFactor / (*DestinationLinesToRequestVMInImmediateFlip * LineTime),
+ (MetaRowBytes + DPTEBytesPerRow * HostVMInefficiencyFactor) / (*DestinationLinesToRequestRowInImmediateFlip * LineTime));
+ } else if ((GPUVMEnable == true || DCCEnable == true)) {
+ *final_flip_bw = (MetaRowBytes + DPTEBytesPerRow * HostVMInefficiencyFactor) / (*DestinationLinesToRequestRowInImmediateFlip * LineTime);
+ } else {
+ *final_flip_bw = 0;
+ }
+
+
+ if (SourcePixelFormat == dm_420_8 || SourcePixelFormat == dm_420_10 || SourcePixelFormat == dm_rgbe_alpha) {
+ if (GPUVMEnable == true && DCCEnable != true) {
+ min_row_time = dml_min(dpte_row_height * LineTime / VRatio, dpte_row_height_chroma * LineTime / VRatioChroma);
+ } else if (GPUVMEnable != true && DCCEnable == true) {
+ min_row_time = dml_min(meta_row_height * LineTime / VRatio, meta_row_height_chroma * LineTime / VRatioChroma);
+ } else {
+ min_row_time = dml_min4(dpte_row_height * LineTime / VRatio, meta_row_height * LineTime / VRatio,
+ dpte_row_height_chroma * LineTime / VRatioChroma, meta_row_height_chroma * LineTime / VRatioChroma);
+ }
+ } else {
+ if (GPUVMEnable == true && DCCEnable != true) {
+ min_row_time = dpte_row_height * LineTime / VRatio;
+ } else if (GPUVMEnable != true && DCCEnable == true) {
+ min_row_time = meta_row_height * LineTime / VRatio;
+ } else {
+ min_row_time = dml_min(dpte_row_height * LineTime / VRatio, meta_row_height * LineTime / VRatio);
+ }
+ }
+
+ if (*DestinationLinesToRequestVMInImmediateFlip >= 32 || *DestinationLinesToRequestRowInImmediateFlip >= 16
+ || TimeForFetchingMetaPTEImmediateFlip + 2 * TimeForFetchingRowInVBlankImmediateFlip > min_row_time) {
+ *ImmediateFlipSupportedForPipe = false;
+ } else {
+ *ImmediateFlipSupportedForPipe = true;
+ }
+}
+
+static double TruncToValidBPP(
+ double LinkBitRate,
+ int Lanes,
+ long HTotal,
+ long HActive,
+ double PixelClock,
+ double DesiredBPP,
+ bool DSCEnable,
+ enum output_encoder_class Output,
+ enum output_format_class Format,
+ unsigned int DSCInputBitPerComponent,
+ int DSCSlices,
+ int AudioRate,
+ int AudioLayout,
+ enum odm_combine_mode ODMCombine)
+{
+ double MaxLinkBPP = 0;
+ int MinDSCBPP = 0;
+ double MaxDSCBPP = 0;
+ int NonDSCBPP0 = 0;
+ int NonDSCBPP1 = 0;
+ int NonDSCBPP2 = 0;
+
+ if (Format == dm_420) {
+ NonDSCBPP0 = 12;
+ NonDSCBPP1 = 15;
+ NonDSCBPP2 = 18;
+ MinDSCBPP = 6;
+ MaxDSCBPP = 1.5 * DSCInputBitPerComponent - 1.0 / 16;
+ } else if (Format == dm_444) {
+ NonDSCBPP0 = 24;
+ NonDSCBPP1 = 30;
+ NonDSCBPP2 = 36;
+ MinDSCBPP = 8;
+ MaxDSCBPP = 3 * DSCInputBitPerComponent - 1.0 / 16;
+ } else {
+ if (Output == dm_hdmi) {
+ NonDSCBPP0 = 24;
+ NonDSCBPP1 = 24;
+ NonDSCBPP2 = 24;
+ }
+ else {
+ NonDSCBPP0 = 16;
+ NonDSCBPP1 = 20;
+ NonDSCBPP2 = 24;
+ }
+
+ if (Format == dm_n422) {
+ MinDSCBPP = 7;
+ MaxDSCBPP = 2 * DSCInputBitPerComponent - 1.0 / 16.0;
+ }
+ else {
+ MinDSCBPP = 8;
+ MaxDSCBPP = 3 * DSCInputBitPerComponent - 1.0 / 16.0;
+ }
+ }
+
+ if (DSCEnable && Output == dm_dp) {
+ MaxLinkBPP = LinkBitRate / 10 * 8 * Lanes / PixelClock * (1 - 2.4 / 100);
+ } else {
+ MaxLinkBPP = LinkBitRate / 10 * 8 * Lanes / PixelClock;
+ }
+
+ if (ODMCombine == dm_odm_combine_mode_4to1 && MaxLinkBPP > 16) {
+ MaxLinkBPP = 16;
+ } else if (ODMCombine == dm_odm_combine_mode_2to1 && MaxLinkBPP > 32) {
+ MaxLinkBPP = 32;
+ }
+
+
+ if (DesiredBPP == 0) {
+ if (DSCEnable) {
+ if (MaxLinkBPP < MinDSCBPP) {
+ return BPP_INVALID;
+ } else if (MaxLinkBPP >= MaxDSCBPP) {
+ return MaxDSCBPP;
+ } else {
+ return dml_floor(16.0 * MaxLinkBPP, 1.0) / 16.0;
+ }
+ } else {
+ if (MaxLinkBPP >= NonDSCBPP2) {
+ return NonDSCBPP2;
+ } else if (MaxLinkBPP >= NonDSCBPP1) {
+ return NonDSCBPP1;
+ } else if (MaxLinkBPP >= NonDSCBPP0) {
+ return NonDSCBPP0;
+ } else {
+ return BPP_INVALID;
+ }
+ }
+ } else {
+ if (!((DSCEnable == false && (DesiredBPP == NonDSCBPP2 || DesiredBPP == NonDSCBPP1 || DesiredBPP == NonDSCBPP0)) ||
+ (DSCEnable && DesiredBPP >= MinDSCBPP && DesiredBPP <= MaxDSCBPP))) {
+ return BPP_INVALID;
+ } else {
+ return DesiredBPP;
+ }
+ }
+ return BPP_INVALID;
+}
+
+void dml30_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_lib)
+{
+ struct vba_vars_st *v = &mode_lib->vba;
+ int MinPrefetchMode = 0;
+ int MaxPrefetchMode = 2;
+ int i;
+ unsigned int j, k, m;
+ bool EnoughWritebackUnits = true;
+ bool WritebackModeSupport = true;
+ bool ViewportExceedsSurface = false;
+ double MaxTotalVActiveRDBandwidth = 0;
+ long ReorderingBytes = 0;
+ bool NotUrgentLatencyHiding[DC__NUM_DPP__MAX] = { 0 };
+
+ /*MODE SUPPORT, VOLTAGE STATE AND SOC CONFIGURATION*/
+
+ /*Scale Ratio, taps Support Check*/
+
+ v->ScaleRatioAndTapsSupport = true;
+ for (k = 0; k <= v->NumberOfActivePlanes - 1; k++) {
+ if (v->ScalerEnabled[k] == false
+ && ((v->SourcePixelFormat[k] != dm_444_64
+ && v->SourcePixelFormat[k] != dm_444_32
+ && v->SourcePixelFormat[k] != dm_444_16
+ && v->SourcePixelFormat[k] != dm_mono_16
+ && v->SourcePixelFormat[k] != dm_mono_8
+ && v->SourcePixelFormat[k] != dm_rgbe
+ && v->SourcePixelFormat[k] != dm_rgbe_alpha)
+ || v->HRatio[k] != 1.0
+ || v->htaps[k] != 1.0
+ || v->VRatio[k] != 1.0
+ || v->vtaps[k] != 1.0)) {
+ v->ScaleRatioAndTapsSupport = false;
+ } else if (v->vtaps[k] < 1.0 || v->vtaps[k] > 8.0
+ || v->htaps[k] < 1.0 || v->htaps[k] > 8.0
+ || (v->htaps[k] > 1.0
+ && (v->htaps[k] % 2) == 1)
+ || v->HRatio[k] > v->MaxHSCLRatio
+ || v->VRatio[k] > v->MaxVSCLRatio
+ || v->HRatio[k] > v->htaps[k]
+ || v->VRatio[k] > v->vtaps[k]
+ || (v->SourcePixelFormat[k] != dm_444_64
+ && v->SourcePixelFormat[k] != dm_444_32
+ && v->SourcePixelFormat[k] != dm_444_16
+ && v->SourcePixelFormat[k] != dm_mono_16
+ && v->SourcePixelFormat[k] != dm_mono_8
+ && v->SourcePixelFormat[k] != dm_rgbe
+ && (v->VTAPsChroma[k] < 1
+ || v->VTAPsChroma[k] > 8
+ || v->HTAPsChroma[k] < 1
+ || v->HTAPsChroma[k] > 8
+ || (v->HTAPsChroma[k] > 1 && v->HTAPsChroma[k] % 2 == 1)
+ || v->HRatioChroma[k] > v->MaxHSCLRatio
+ || v->VRatioChroma[k] > v->MaxVSCLRatio
+ || v->HRatioChroma[k] > v->HTAPsChroma[k]
+ || v->VRatioChroma[k] > v->VTAPsChroma[k]))) {
+ v->ScaleRatioAndTapsSupport = false;
+ }
+ }
+ /*Source Format, Pixel Format and Scan Support Check*/
+
+ v->SourceFormatPixelAndScanSupport = true;
+ for (k = 0; k <= v->NumberOfActivePlanes - 1; k++) {
+ if ((v->SurfaceTiling[k] == dm_sw_linear && (!(v->SourceScan[k] != dm_vert) || v->DCCEnable[k] == true))
+ || ((v->SurfaceTiling[k] == dm_sw_64kb_d || v->SurfaceTiling[k] == dm_sw_64kb_d_t || v->SurfaceTiling[k] == dm_sw_64kb_d_x)
+ && !(v->SourcePixelFormat[k] == dm_444_64))) {
+ v->SourceFormatPixelAndScanSupport = false;
+ }
+ }
+ /*Bandwidth Support Check*/
+
+ for (k = 0; k <= v->NumberOfActivePlanes - 1; k++) {
+ CalculateBytePerPixelAnd256BBlockSizes(
+ v->SourcePixelFormat[k],
+ v->SurfaceTiling[k],
+ &v->BytePerPixelY[k],
+ &v->BytePerPixelC[k],
+ &v->BytePerPixelInDETY[k],
+ &v->BytePerPixelInDETC[k],
+ &v->Read256BlockHeightY[k],
+ &v->Read256BlockHeightC[k],
+ &v->Read256BlockWidthY[k],
+ &v->Read256BlockWidthC[k]);
+ }
+ for (k = 0; k <= v->NumberOfActivePlanes - 1; k++) {
+ if (v->SourceScan[k] != dm_vert) {
+ v->SwathWidthYSingleDPP[k] = v->ViewportWidth[k];
+ v->SwathWidthCSingleDPP[k] = v->ViewportWidthChroma[k];
+ } else {
+ v->SwathWidthYSingleDPP[k] = v->ViewportHeight[k];
+ v->SwathWidthCSingleDPP[k] = v->ViewportHeightChroma[k];
+ }
+ }
+ for (k = 0; k <= v->NumberOfActivePlanes - 1; k++) {
+ v->ReadBandwidthLuma[k] = v->SwathWidthYSingleDPP[k] * dml_ceil(v->BytePerPixelInDETY[k], 1.0) / (v->HTotal[k] / v->PixelClock[k]) * v->VRatio[k];
+ v->ReadBandwidthChroma[k] = v->SwathWidthYSingleDPP[k] / 2 * dml_ceil(v->BytePerPixelInDETC[k], 2.0) / (v->HTotal[k] / v->PixelClock[k]) * v->VRatio[k] / 2.0;
+ }
+ for (k = 0; k <= v->NumberOfActivePlanes - 1; k++) {
+ if (v->WritebackEnable[k] == true
+ && v->WritebackPixelFormat[k] == dm_444_64) {
+ v->WriteBandwidth[k] = v->WritebackDestinationWidth[k]
+ * v->WritebackDestinationHeight[k]
+ / (v->WritebackSourceHeight[k]
+ * v->HTotal[k]
+ / v->PixelClock[k]) * 8.0;
+ } else if (v->WritebackEnable[k] == true) {
+ v->WriteBandwidth[k] = v->WritebackDestinationWidth[k]
+ * v->WritebackDestinationHeight[k]
+ / (v->WritebackSourceHeight[k]
+ * v->HTotal[k]
+ / v->PixelClock[k]) * 4.0;
+ } else {
+ v->WriteBandwidth[k] = 0.0;
+ }
+ }
+
+ /*Writeback Latency support check*/
+
+ v->WritebackLatencySupport = true;
+ for (k = 0; k <= v->NumberOfActivePlanes - 1; k++) {
+ if (v->WritebackEnable[k] == true) {
+ if (v->WritebackConfiguration == dm_whole_buffer_for_single_stream_no_interleave ||
+ v->WritebackConfiguration == dm_whole_buffer_for_single_stream_interleave) {
+ if (v->WriteBandwidth[k]
+ > 2.0 * v->WritebackInterfaceBufferSize * 1024
+ / v->WritebackLatency) {
+ v->WritebackLatencySupport = false;
+ }
+ } else {
+ if (v->WriteBandwidth[k]
+ > v->WritebackInterfaceBufferSize * 1024
+ / v->WritebackLatency) {
+ v->WritebackLatencySupport = false;
+ }
+ }
+ }
+ }
+
+ /*Writeback Mode Support Check*/
+
+ v->TotalNumberOfActiveWriteback = 0;
+ for (k = 0; k <= v->NumberOfActivePlanes - 1; k++) {
+ if (v->WritebackEnable[k] == true) {
+ v->TotalNumberOfActiveWriteback =
+ v->TotalNumberOfActiveWriteback + 1;
+ }
+ }
+
+ if (v->TotalNumberOfActiveWriteback > v->MaxNumWriteback) {
+ EnoughWritebackUnits = false;
+ }
+ if (!v->WritebackSupportInterleaveAndUsingWholeBufferForASingleStream
+ && (v->WritebackConfiguration == dm_whole_buffer_for_single_stream_no_interleave
+ || v->WritebackConfiguration == dm_whole_buffer_for_single_stream_interleave)) {
+
+ WritebackModeSupport = false;
+ }
+ if (v->WritebackConfiguration == dm_whole_buffer_for_single_stream_no_interleave && v->TotalNumberOfActiveWriteback > 1) {
+ WritebackModeSupport = false;
+ }
+
+ /*Writeback Scale Ratio and Taps Support Check*/
+
+ v->WritebackScaleRatioAndTapsSupport = true;
+ for (k = 0; k <= v->NumberOfActivePlanes - 1; k++) {
+ if (v->WritebackEnable[k] == true) {
+ if (v->WritebackHRatio[k] > v->WritebackMaxHSCLRatio
+ || v->WritebackVRatio[k]
+ > v->WritebackMaxVSCLRatio
+ || v->WritebackHRatio[k]
+ < v->WritebackMinHSCLRatio
+ || v->WritebackVRatio[k]
+ < v->WritebackMinVSCLRatio
+ || v->WritebackHTaps[k]
+ > v->WritebackMaxHSCLTaps
+ || v->WritebackVTaps[k]
+ > v->WritebackMaxVSCLTaps
+ || v->WritebackHRatio[k]
+ > v->WritebackHTaps[k]
+ || v->WritebackVRatio[k]
+ > v->WritebackVTaps[k]
+ || (v->WritebackHTaps[k] > 2.0
+ && ((v->WritebackHTaps[k] % 2)
+ == 1))) {
+ v->WritebackScaleRatioAndTapsSupport = false;
+ }
+ if (2.0 * v->WritebackDestinationWidth[k] * (v->WritebackVTaps[k] - 1) * 57 > v->WritebackLineBufferSize) {
+ v->WritebackScaleRatioAndTapsSupport = false;
+ }
+ }
+ }
+ /*Maximum DISPCLK/DPPCLK Support check*/
+
+ v->WritebackRequiredDISPCLK = 0.0;
+ for (k = 0; k <= v->NumberOfActivePlanes - 1; k++) {
+ if (v->WritebackEnable[k] == true) {
+ v->WritebackRequiredDISPCLK = dml_max(v->WritebackRequiredDISPCLK,
+ dml30_CalculateWriteBackDISPCLK(
+ v->WritebackPixelFormat[k],
+ v->PixelClock[k],
+ v->WritebackHRatio[k],
+ v->WritebackVRatio[k],
+ v->WritebackHTaps[k],
+ v->WritebackVTaps[k],
+ v->WritebackSourceWidth[k],
+ v->WritebackDestinationWidth[k],
+ v->HTotal[k],
+ v->WritebackLineBufferSize));
+ }
+ }
+ for (k = 0; k <= v->NumberOfActivePlanes - 1; k++) {
+ if (v->HRatio[k] > 1.0) {
+ v->PSCL_FACTOR[k] = dml_min(v->MaxDCHUBToPSCLThroughput, v->MaxPSCLToLBThroughput * v->HRatio[k] / dml_ceil(v->htaps[k] / 6.0, 1.0));
+ } else {
+ v->PSCL_FACTOR[k] = dml_min(v->MaxDCHUBToPSCLThroughput, v->MaxPSCLToLBThroughput);
+ }
+ if (v->BytePerPixelC[k] == 0.0) {
+ v->PSCL_FACTOR_CHROMA[k] = 0.0;
+ v->MinDPPCLKUsingSingleDPP[k] = v->PixelClock[k]
+ * dml_max3(v->vtaps[k] / 6.0 * dml_min(1.0, v->HRatio[k]), v->HRatio[k] * v->VRatio[k] / v->PSCL_FACTOR[k], 1.0);
+ if ((v->htaps[k] > 6.0 || v->vtaps[k] > 6.0) && v->MinDPPCLKUsingSingleDPP[k] < 2.0 * v->PixelClock[k]) {
+ v->MinDPPCLKUsingSingleDPP[k] = 2.0 * v->PixelClock[k];
+ }
+ } else {
+ if (v->HRatioChroma[k] > 1.0) {
+ v->PSCL_FACTOR_CHROMA[k] = dml_min(v->MaxDCHUBToPSCLThroughput,
+ v->MaxPSCLToLBThroughput * v->HRatioChroma[k] / dml_ceil(v->HTAPsChroma[k] / 6.0, 1.0));
+ } else {
+ v->PSCL_FACTOR_CHROMA[k] = dml_min(v->MaxDCHUBToPSCLThroughput, v->MaxPSCLToLBThroughput);
+ }
+ v->MinDPPCLKUsingSingleDPP[k] = v->PixelClock[k] * dml_max5(v->vtaps[k] / 6.0 * dml_min(1.0, v->HRatio[k]),
+ v->HRatio[k] * v->VRatio[k] / v->PSCL_FACTOR[k],
+ v->VTAPsChroma[k] / 6.0 * dml_min(1.0, v->HRatioChroma[k]),
+ v->HRatioChroma[k] * v->VRatioChroma[k] / v->PSCL_FACTOR_CHROMA[k],
+ 1.0);
+ if ((v->htaps[k] > 6.0 || v->vtaps[k] > 6.0 || v->HTAPsChroma[k] > 6.0 || v->VTAPsChroma[k] > 6.0)
+ && v->MinDPPCLKUsingSingleDPP[k] < 2.0 * v->PixelClock[k]) {
+ v->MinDPPCLKUsingSingleDPP[k] = 2.0 * v->PixelClock[k];
+ }
+ }
+ }
+ for (k = 0; k <= v->NumberOfActivePlanes - 1; k++) {
+ int MaximumSwathWidthSupportLuma = 0;
+ int MaximumSwathWidthSupportChroma = 0;
+
+ if (v->SurfaceTiling[k] == dm_sw_linear) {
+ MaximumSwathWidthSupportLuma = 8192.0;
+ } else if (v->SourceScan[k] == dm_vert && v->BytePerPixelC[k] > 0) {
+ MaximumSwathWidthSupportLuma = 2880.0;
+ } else {
+ MaximumSwathWidthSupportLuma = 5760.0;
+ }
+
+ if (v->SourcePixelFormat[k] == dm_420_8 || v->SourcePixelFormat[k] == dm_420_10 || v->SourcePixelFormat[k] == dm_420_12) {
+ MaximumSwathWidthSupportChroma = MaximumSwathWidthSupportLuma / 2.0;
+ } else {
+ MaximumSwathWidthSupportChroma = MaximumSwathWidthSupportLuma;
+ }
+ v->MaximumSwathWidthInLineBufferLuma = v->LineBufferSize * dml_max(v->HRatio[k], 1.0) / v->LBBitPerPixel[k]
+ / (v->vtaps[k] + dml_max(dml_ceil(v->VRatio[k], 1.0) - 2, 0.0));
+ if (v->BytePerPixelC[k] == 0.0) {
+ v->MaximumSwathWidthInLineBufferChroma = 0;
+ } else {
+ v->MaximumSwathWidthInLineBufferChroma = v->LineBufferSize * dml_max(v->HRatioChroma[k], 1.0) / v->LBBitPerPixel[k]
+ / (v->VTAPsChroma[k] + dml_max(dml_ceil(v->VRatioChroma[k], 1.0) - 2, 0.0));
+ }
+ v->MaximumSwathWidthLuma[k] = dml_min(MaximumSwathWidthSupportLuma, v->MaximumSwathWidthInLineBufferLuma);
+ v->MaximumSwathWidthChroma[k] = dml_min(MaximumSwathWidthSupportChroma, v->MaximumSwathWidthInLineBufferChroma);
+ }
+
+ CalculateSwathAndDETConfiguration(
+ true,
+ v->NumberOfActivePlanes,
+ v->DETBufferSizeInKByte,
+ v->MaximumSwathWidthLuma,
+ v->MaximumSwathWidthChroma,
+ v->SourceScan,
+ v->SourcePixelFormat,
+ v->SurfaceTiling,
+ v->ViewportWidth,
+ v->ViewportHeight,
+ v->SurfaceWidthY,
+ v->SurfaceWidthC,
+ v->SurfaceHeightY,
+ v->SurfaceHeightC,
+ v->Read256BlockHeightY,
+ v->Read256BlockHeightC,
+ v->Read256BlockWidthY,
+ v->Read256BlockWidthC,
+ v->odm_combine_dummy,
+ v->BlendingAndTiming,
+ v->BytePerPixelY,
+ v->BytePerPixelC,
+ v->BytePerPixelInDETY,
+ v->BytePerPixelInDETC,
+ v->HActive,
+ v->HRatio,
+ v->HRatioChroma,
+ v->DPPPerPlane,
+ v->swath_width_luma_ub,
+ v->swath_width_chroma_ub,
+ v->SwathWidthY,
+ v->SwathWidthC,
+ v->SwathHeightY,
+ v->SwathHeightC,
+ v->DETBufferSizeY,
+ v->DETBufferSizeC,
+ v->SingleDPPViewportSizeSupportPerPlane,
+ &v->ViewportSizeSupport[0][0]);
+
+ for (i = 0; i < v->soc.num_states; i++) {
+ for (j = 0; j < 2; j++) {
+ v->MaxDispclkRoundedDownToDFSGranularity = RoundToDFSGranularityDown(v->MaxDispclk[i], v->DISPCLKDPPCLKVCOSpeed);
+ v->MaxDppclkRoundedDownToDFSGranularity = RoundToDFSGranularityDown(v->MaxDppclk[i], v->DISPCLKDPPCLKVCOSpeed);
+ v->RequiredDISPCLK[i][j] = 0.0;
+ v->DISPCLK_DPPCLK_Support[i][j] = true;
+ for (k = 0; k <= v->NumberOfActivePlanes - 1; k++) {
+ v->PlaneRequiredDISPCLKWithoutODMCombine = v->PixelClock[k] * (1.0 + v->DISPCLKDPPCLKDSCCLKDownSpreading / 100.0)
+ * (1.0 + v->DISPCLKRampingMargin / 100.0);
+ if ((v->PlaneRequiredDISPCLKWithoutODMCombine >= v->MaxDispclk[i] && v->MaxDispclk[i] == v->MaxDispclk[mode_lib->soc.num_states]
+ && v->MaxDppclk[i] == v->MaxDppclk[mode_lib->soc.num_states])) {
+ v->PlaneRequiredDISPCLKWithoutODMCombine = v->PixelClock[k] * (1 + v->DISPCLKDPPCLKDSCCLKDownSpreading / 100.0);
+ }
+ v->PlaneRequiredDISPCLKWithODMCombine2To1 = v->PixelClock[k] / 2 * (1 + v->DISPCLKDPPCLKDSCCLKDownSpreading / 100.0)
+ * (1 + v->DISPCLKRampingMargin / 100.0);
+ if ((v->PlaneRequiredDISPCLKWithODMCombine2To1 >= v->MaxDispclk[i] && v->MaxDispclk[i] == v->MaxDispclk[mode_lib->soc.num_states]
+ && v->MaxDppclk[i] == v->MaxDppclk[mode_lib->soc.num_states])) {
+ v->PlaneRequiredDISPCLKWithODMCombine2To1 = v->PixelClock[k] / 2 * (1 + v->DISPCLKDPPCLKDSCCLKDownSpreading / 100.0);
+ }
+ v->PlaneRequiredDISPCLKWithODMCombine4To1 = v->PixelClock[k] / 4 * (1 + v->DISPCLKDPPCLKDSCCLKDownSpreading / 100.0)
+ * (1 + v->DISPCLKRampingMargin / 100.0);
+ if ((v->PlaneRequiredDISPCLKWithODMCombine4To1 >= v->MaxDispclk[i] && v->MaxDispclk[i] == v->MaxDispclk[mode_lib->soc.num_states]
+ && v->MaxDppclk[i] == v->MaxDppclk[mode_lib->soc.num_states])) {
+ v->PlaneRequiredDISPCLKWithODMCombine4To1 = v->PixelClock[k] / 4 * (1 + v->DISPCLKDPPCLKDSCCLKDownSpreading / 100.0);
+ }
+
+ if (v->ODMCombinePolicy == dm_odm_combine_policy_none) {
+ v->ODMCombineEnablePerState[i][k] = dm_odm_combine_mode_disabled;
+ v->PlaneRequiredDISPCLK = v->PlaneRequiredDISPCLKWithoutODMCombine;
+ } else if (v->ODMCombinePolicy == dm_odm_combine_policy_2to1) {
+ v->ODMCombineEnablePerState[i][k] = dm_odm_combine_mode_2to1;
+ v->PlaneRequiredDISPCLK = v->PlaneRequiredDISPCLKWithODMCombine2To1;
+ } else if (v->ODMCombinePolicy == dm_odm_combine_policy_4to1
+ || v->PlaneRequiredDISPCLKWithODMCombine2To1 > v->MaxDispclkRoundedDownToDFSGranularity) {
+ v->ODMCombineEnablePerState[i][k] = dm_odm_combine_mode_4to1;
+ v->PlaneRequiredDISPCLK = v->PlaneRequiredDISPCLKWithODMCombine4To1;
+ } else if (v->PlaneRequiredDISPCLKWithoutODMCombine > v->MaxDispclkRoundedDownToDFSGranularity) {
+ v->ODMCombineEnablePerState[i][k] = dm_odm_combine_mode_2to1;
+ v->PlaneRequiredDISPCLK = v->PlaneRequiredDISPCLKWithODMCombine2To1;
+ } else {
+ v->ODMCombineEnablePerState[i][k] = dm_odm_combine_mode_disabled;
+ v->PlaneRequiredDISPCLK = v->PlaneRequiredDISPCLKWithoutODMCombine;
+ /*420 format workaround*/
+ if (v->HActive[k] > 4096 && v->OutputFormat[k] == dm_420) {
+ v->ODMCombineEnablePerState[i][k] = dm_odm_combine_mode_2to1;
+ v->PlaneRequiredDISPCLK = v->PlaneRequiredDISPCLKWithODMCombine2To1;
+ }
+ }
+
+ if (v->ODMCombineEnablePerState[i][k] == dm_odm_combine_mode_4to1) {
+ v->MPCCombine[i][j][k] = false;
+ v->NoOfDPP[i][j][k] = 4;
+ v->RequiredDPPCLK[i][j][k] = v->MinDPPCLKUsingSingleDPP[k] * (1 + v->DISPCLKDPPCLKDSCCLKDownSpreading / 100.0) / 4;
+ } else if (v->ODMCombineEnablePerState[i][k] == dm_odm_combine_mode_2to1) {
+ v->MPCCombine[i][j][k] = false;
+ v->NoOfDPP[i][j][k] = 2;
+ v->RequiredDPPCLK[i][j][k] = v->MinDPPCLKUsingSingleDPP[k] * (1 + v->DISPCLKDPPCLKDSCCLKDownSpreading / 100.0) / 2;
+ } else if ((v->WhenToDoMPCCombine == dm_mpc_never
+ || (v->MinDPPCLKUsingSingleDPP[k] * (1 + v->DISPCLKDPPCLKDSCCLKDownSpreading / 100.0) <= v->MaxDppclkRoundedDownToDFSGranularity
+ && v->SingleDPPViewportSizeSupportPerPlane[k] == true))) {
+ v->MPCCombine[i][j][k] = false;
+ v->NoOfDPP[i][j][k] = 1;
+ v->RequiredDPPCLK[i][j][k] = v->MinDPPCLKUsingSingleDPP[k] * (1.0 + v->DISPCLKDPPCLKDSCCLKDownSpreading / 100.0);
+ } else {
+ v->MPCCombine[i][j][k] = true;
+ v->NoOfDPP[i][j][k] = 2;
+ v->RequiredDPPCLK[i][j][k] = v->MinDPPCLKUsingSingleDPP[k] * (1.0 + v->DISPCLKDPPCLKDSCCLKDownSpreading / 100.0) / 2.0;
+ }
+ v->RequiredDISPCLK[i][j] = dml_max(v->RequiredDISPCLK[i][j], v->PlaneRequiredDISPCLK);
+ if ((v->MinDPPCLKUsingSingleDPP[k] / v->NoOfDPP[i][j][k] * (1.0 + v->DISPCLKDPPCLKDSCCLKDownSpreading / 100.0)
+ > v->MaxDppclkRoundedDownToDFSGranularity) || (v->PlaneRequiredDISPCLK > v->MaxDispclkRoundedDownToDFSGranularity)) {
+ v->DISPCLK_DPPCLK_Support[i][j] = false;
+ }
+ }
+ v->TotalNumberOfActiveDPP[i][j] = 0;
+ v->TotalNumberOfSingleDPPPlanes[i][j] = 0;
+ for (k = 0; k <= v->NumberOfActivePlanes - 1; k++) {
+ v->TotalNumberOfActiveDPP[i][j] = v->TotalNumberOfActiveDPP[i][j] + v->NoOfDPP[i][j][k];
+ if (v->NoOfDPP[i][j][k] == 1)
+ v->TotalNumberOfSingleDPPPlanes[i][j] = v->TotalNumberOfSingleDPPPlanes[i][j] + 1;
+ }
+ if (j == 1 && v->WhenToDoMPCCombine != dm_mpc_never) {
+ while (!(v->TotalNumberOfActiveDPP[i][j] >= v->MaxNumDPP || v->TotalNumberOfSingleDPPPlanes[i][j] == 0)) {
+ double BWOfNonSplitPlaneOfMaximumBandwidth = 0;
+ unsigned int NumberOfNonSplitPlaneOfMaximumBandwidth = 0;
+ BWOfNonSplitPlaneOfMaximumBandwidth = 0;
+ NumberOfNonSplitPlaneOfMaximumBandwidth = 0;
+ for (k = 0; k < v->NumberOfActivePlanes; ++k) {
+ if (v->ReadBandwidthLuma[k] + v->ReadBandwidthChroma[k] > BWOfNonSplitPlaneOfMaximumBandwidth
+ && v->ODMCombineEnablePerState[i][k] == dm_odm_combine_mode_disabled && v->MPCCombine[i][j][k] == false) {
+ BWOfNonSplitPlaneOfMaximumBandwidth = v->ReadBandwidthLuma[k] + v->ReadBandwidthChroma[k];
+ NumberOfNonSplitPlaneOfMaximumBandwidth = k;
+ }
+ }
+ v->MPCCombine[i][j][NumberOfNonSplitPlaneOfMaximumBandwidth] = true;
+ v->NoOfDPP[i][j][NumberOfNonSplitPlaneOfMaximumBandwidth] = 2;
+ v->RequiredDPPCLK[i][j][NumberOfNonSplitPlaneOfMaximumBandwidth] = v->MinDPPCLKUsingSingleDPP[NumberOfNonSplitPlaneOfMaximumBandwidth]
+ * (1 + v->DISPCLKDPPCLKDSCCLKDownSpreading / 100) / 2;
+ v->TotalNumberOfActiveDPP[i][j] = v->TotalNumberOfActiveDPP[i][j] + 1;
+ v->TotalNumberOfSingleDPPPlanes[i][j] = v->TotalNumberOfSingleDPPPlanes[i][j] + 1;
+ }
+ }
+ if (v->TotalNumberOfActiveDPP[i][j] > v->MaxNumDPP) {
+ v->RequiredDISPCLK[i][j] = 0.0;
+ v->DISPCLK_DPPCLK_Support[i][j] = true;
+ for (k = 0; k <= v->NumberOfActivePlanes - 1; k++) {
+ v->ODMCombineEnablePerState[i][k] = dm_odm_combine_mode_disabled;
+ if (v->SingleDPPViewportSizeSupportPerPlane[k] == false && v->WhenToDoMPCCombine != dm_mpc_never) {
+ v->MPCCombine[i][j][k] = true;
+ v->NoOfDPP[i][j][k] = 2;
+ v->RequiredDPPCLK[i][j][k] = v->MinDPPCLKUsingSingleDPP[k] * (1.0 + v->DISPCLKDPPCLKDSCCLKDownSpreading / 100.0) / 2.0;
+ } else {
+ v->MPCCombine[i][j][k] = false;
+ v->NoOfDPP[i][j][k] = 1;
+ v->RequiredDPPCLK[i][j][k] = v->MinDPPCLKUsingSingleDPP[k] * (1.0 + v->DISPCLKDPPCLKDSCCLKDownSpreading / 100.0);
+ }
+ if (!(v->MaxDispclk[i] == v->MaxDispclk[v->soc.num_states - 1] && v->MaxDppclk[i] == v->MaxDppclk[v->soc.num_states - 1])) {
+ v->PlaneRequiredDISPCLK = v->PixelClock[k] * (1.0 + v->DISPCLKDPPCLKDSCCLKDownSpreading / 100.0)
+ * (1.0 + v->DISPCLKRampingMargin / 100.0);
+ } else {
+ v->PlaneRequiredDISPCLK = v->PixelClock[k] * (1.0 + v->DISPCLKDPPCLKDSCCLKDownSpreading / 100.0);
+ }
+ v->RequiredDISPCLK[i][j] = dml_max(v->RequiredDISPCLK[i][j], v->PlaneRequiredDISPCLK);
+ if ((v->MinDPPCLKUsingSingleDPP[k] / v->NoOfDPP[i][j][k] * (1.0 + v->DISPCLKDPPCLKDSCCLKDownSpreading / 100.0)
+ > v->MaxDppclkRoundedDownToDFSGranularity) || (v->PlaneRequiredDISPCLK > v->MaxDispclkRoundedDownToDFSGranularity)) {
+ v->DISPCLK_DPPCLK_Support[i][j] = false;
+ }
+ }
+ v->TotalNumberOfActiveDPP[i][j] = 0.0;
+ for (k = 0; k <= v->NumberOfActivePlanes - 1; k++) {
+ v->TotalNumberOfActiveDPP[i][j] = v->TotalNumberOfActiveDPP[i][j] + v->NoOfDPP[i][j][k];
+ }
+ }
+ v->RequiredDISPCLK[i][j] = dml_max(v->RequiredDISPCLK[i][j], v->WritebackRequiredDISPCLK);
+ if (v->MaxDispclkRoundedDownToDFSGranularity < v->WritebackRequiredDISPCLK) {
+ v->DISPCLK_DPPCLK_Support[i][j] = false;
+ }
+ }
+ }
+
+ /*Total Available Pipes Support Check*/
+
+ for (i = 0; i < v->soc.num_states; i++) {
+ for (j = 0; j < 2; j++) {
+ if (v->TotalNumberOfActiveDPP[i][j] <= v->MaxNumDPP) {
+ v->TotalAvailablePipesSupport[i][j] = true;
+ } else {
+ v->TotalAvailablePipesSupport[i][j] = false;
+ }
+ }
+ }
+ /*Display IO and DSC Support Check*/
+
+ v->NonsupportedDSCInputBPC = false;
+ for (k = 0; k <= v->NumberOfActivePlanes - 1; k++) {
+ if (!(v->DSCInputBitPerComponent[k] == 12.0
+ || v->DSCInputBitPerComponent[k] == 10.0
+ || v->DSCInputBitPerComponent[k] == 8.0)) {
+ v->NonsupportedDSCInputBPC = true;
+ }
+ }
+
+ /*Number Of DSC Slices*/
+ for (k = 0; k < v->NumberOfActivePlanes; ++k) {
+ if (v->BlendingAndTiming[k] == k) {
+ if (v->PixelClockBackEnd[k] > 3200) {
+ v->NumberOfDSCSlices[k] = dml_ceil(v->PixelClockBackEnd[k] / 400.0, 4.0);
+ } else if (v->PixelClockBackEnd[k] > 1360) {
+ v->NumberOfDSCSlices[k] = 8;
+ } else if (v->PixelClockBackEnd[k] > 680) {
+ v->NumberOfDSCSlices[k] = 4;
+ } else if (v->PixelClockBackEnd[k] > 340) {
+ v->NumberOfDSCSlices[k] = 2;
+ } else {
+ v->NumberOfDSCSlices[k] = 1;
+ }
+ } else {
+ v->NumberOfDSCSlices[k] = 0;
+ }
+ }
+
+ for (i = 0; i < v->soc.num_states; i++) {
+ for (k = 0; k <= v->NumberOfActivePlanes - 1; k++) {
+ v->RequiresDSC[i][k] = false;
+ v->RequiresFEC[i][k] = false;
+ if (v->BlendingAndTiming[k] == k) {
+ if (v->Output[k] == dm_hdmi) {
+ v->RequiresDSC[i][k] = false;
+ v->RequiresFEC[i][k] = false;
+ v->OutputBppPerState[i][k] = TruncToValidBPP(
+ dml_min(600.0, v->PHYCLKPerState[i]) * 10,
+ 3,
+ v->HTotal[k],
+ v->HActive[k],
+ v->PixelClockBackEnd[k],
+ v->ForcedOutputLinkBPP[k],
+ false,
+ v->Output[k],
+ v->OutputFormat[k],
+ v->DSCInputBitPerComponent[k],
+ v->NumberOfDSCSlices[k],
+ v->AudioSampleRate[k],
+ v->AudioSampleLayout[k],
+ v->ODMCombineEnablePerState[i][k]);
+ } else if (v->Output[k] == dm_dp || v->Output[k] == dm_edp) {
+ if (v->DSCEnable[k] == true) {
+ v->RequiresDSC[i][k] = true;
+ v->LinkDSCEnable = true;
+ if (v->Output[k] == dm_dp) {
+ v->RequiresFEC[i][k] = true;
+ } else {
+ v->RequiresFEC[i][k] = false;
+ }
+ } else {
+ v->RequiresDSC[i][k] = false;
+ v->LinkDSCEnable = false;
+ v->RequiresFEC[i][k] = false;
+ }
+
+ v->Outbpp = BPP_INVALID;
+ if (v->PHYCLKPerState[i] >= 270.0) {
+ v->Outbpp = TruncToValidBPP(
+ (1.0 - v->Downspreading / 100.0) * 2700,
+ v->OutputLinkDPLanes[k],
+ v->HTotal[k],
+ v->HActive[k],
+ v->PixelClockBackEnd[k],
+ v->ForcedOutputLinkBPP[k],
+ v->LinkDSCEnable,
+ v->Output[k],
+ v->OutputFormat[k],
+ v->DSCInputBitPerComponent[k],
+ v->NumberOfDSCSlices[k],
+ v->AudioSampleRate[k],
+ v->AudioSampleLayout[k],
+ v->ODMCombineEnablePerState[i][k]);
+ v->OutputBppPerState[i][k] = v->Outbpp;
+ // TODO: Need some other way to handle this nonsense
+ // v->OutputTypeAndRatePerState[i][k] = v->Output[k] & " HBR"
+ }
+ if (v->Outbpp == BPP_INVALID && v->PHYCLKPerState[i] >= 540.0) {
+ v->Outbpp = TruncToValidBPP(
+ (1.0 - v->Downspreading / 100.0) * 5400,
+ v->OutputLinkDPLanes[k],
+ v->HTotal[k],
+ v->HActive[k],
+ v->PixelClockBackEnd[k],
+ v->ForcedOutputLinkBPP[k],
+ v->LinkDSCEnable,
+ v->Output[k],
+ v->OutputFormat[k],
+ v->DSCInputBitPerComponent[k],
+ v->NumberOfDSCSlices[k],
+ v->AudioSampleRate[k],
+ v->AudioSampleLayout[k],
+ v->ODMCombineEnablePerState[i][k]);
+ v->OutputBppPerState[i][k] = v->Outbpp;
+ // TODO: Need some other way to handle this nonsense
+ // v->OutputTypeAndRatePerState[i][k] = v->Output[k] & " HBR2"
+ }
+ if (v->Outbpp == BPP_INVALID && v->PHYCLKPerState[i] >= 810.0) {
+ v->Outbpp = TruncToValidBPP(
+ (1.0 - v->Downspreading / 100.0) * 8100,
+ v->OutputLinkDPLanes[k],
+ v->HTotal[k],
+ v->HActive[k],
+ v->PixelClockBackEnd[k],
+ v->ForcedOutputLinkBPP[k],
+ v->LinkDSCEnable,
+ v->Output[k],
+ v->OutputFormat[k],
+ v->DSCInputBitPerComponent[k],
+ v->NumberOfDSCSlices[k],
+ v->AudioSampleRate[k],
+ v->AudioSampleLayout[k],
+ v->ODMCombineEnablePerState[i][k]);
+ if (v->Outbpp == BPP_INVALID && v->ForcedOutputLinkBPP[k] == 0) {
+ //if (v->Outbpp == BPP_INVALID && v->DSCEnabled[k] == dm_dsc_enable_only_if_necessary && v->ForcedOutputLinkBPP[k] == 0) {
+ v->RequiresDSC[i][k] = true;
+ v->LinkDSCEnable = true;
+ if (v->Output[k] == dm_dp) {
+ v->RequiresFEC[i][k] = true;
+ }
+ v->Outbpp = TruncToValidBPP(
+ (1.0 - v->Downspreading / 100.0) * 8100,
+ v->OutputLinkDPLanes[k],
+ v->HTotal[k],
+ v->HActive[k],
+ v->PixelClockBackEnd[k],
+ v->ForcedOutputLinkBPP[k],
+ v->LinkDSCEnable,
+ v->Output[k],
+ v->OutputFormat[k],
+ v->DSCInputBitPerComponent[k],
+ v->NumberOfDSCSlices[k],
+ v->AudioSampleRate[k],
+ v->AudioSampleLayout[k],
+ v->ODMCombineEnablePerState[i][k]);
+ }
+ v->OutputBppPerState[i][k] = v->Outbpp;
+ // TODO: Need some other way to handle this nonsense
+ // v->OutputTypeAndRatePerState[i][k] = v->Output[k] & " HBR3"
+ }
+ }
+ } else {
+ v->OutputBppPerState[i][k] = 0;
+ }
+ }
+ }
+ for (i = 0; i < v->soc.num_states; i++) {
+ v->DIOSupport[i] = true;
+ for (k = 0; k <= v->NumberOfActivePlanes - 1; k++) {
+ if (v->BlendingAndTiming[k] == k && (v->Output[k] == dm_dp || v->Output[k] == dm_edp || v->Output[k] == dm_hdmi)
+ && (v->OutputBppPerState[i][k] == 0
+ || (v->OutputFormat[k] == dm_420 && v->Interlace[k] == true && v->ProgressiveToInterlaceUnitInOPP == true))) {
+ v->DIOSupport[i] = false;
+ }
+ }
+ }
+
+ for (i = 0; i < v->soc.num_states; ++i) {
+ v->ODMCombine4To1SupportCheckOK[i] = true;
+ for (k = 0; k < v->NumberOfActivePlanes; ++k) {
+ if (v->BlendingAndTiming[k] == k && v->ODMCombineEnablePerState[i][k] == dm_odm_combine_mode_4to1
+ && (v->ODMCombine4To1Supported == false || v->Output[k] == dm_dp || v->Output[k] == dm_edp || v->Output[k] == dm_hdmi)) {
+ v->ODMCombine4To1SupportCheckOK[i] = false;
+ }
+ }
+ }
+
+ for (i = 0; i < v->soc.num_states; i++) {
+ v->DSCCLKRequiredMoreThanSupported[i] = false;
+ for (k = 0; k <= v->NumberOfActivePlanes - 1; k++) {
+ if (v->BlendingAndTiming[k] == k) {
+ if (v->Output[k] == dm_dp || v->Output[k] == dm_edp) {
+ if (v->OutputFormat[k] == dm_420) {
+ v->DSCFormatFactor = 2;
+ } else if (v->OutputFormat[k] == dm_444) {
+ v->DSCFormatFactor = 1;
+ } else if (v->OutputFormat[k] == dm_n422) {
+ v->DSCFormatFactor = 2;
+ } else {
+ v->DSCFormatFactor = 1;
+ }
+ if (v->RequiresDSC[i][k] == true) {
+ if (v->ODMCombineEnablePerState[i][k] == dm_odm_combine_mode_4to1) {
+ if (v->PixelClockBackEnd[k] / 12.0 / v->DSCFormatFactor
+ > (1.0 - v->DISPCLKDPPCLKDSCCLKDownSpreading / 100.0) * v->MaxDSCCLK[i]) {
+ v->DSCCLKRequiredMoreThanSupported[i] = true;
+ }
+ } else if (v->ODMCombineEnablePerState[i][k] == dm_odm_combine_mode_2to1) {
+ if (v->PixelClockBackEnd[k] / 6.0 / v->DSCFormatFactor
+ > (1.0 - v->DISPCLKDPPCLKDSCCLKDownSpreading / 100.0) * v->MaxDSCCLK[i]) {
+ v->DSCCLKRequiredMoreThanSupported[i] = true;
+ }
+ } else {
+ if (v->PixelClockBackEnd[k] / 3.0 / v->DSCFormatFactor
+ > (1.0 - v->DISPCLKDPPCLKDSCCLKDownSpreading / 100.0) * v->MaxDSCCLK[i]) {
+ v->DSCCLKRequiredMoreThanSupported[i] = true;
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ for (i = 0; i < v->soc.num_states; i++) {
+ v->NotEnoughDSCUnits[i] = false;
+ v->TotalDSCUnitsRequired = 0.0;
+ for (k = 0; k <= v->NumberOfActivePlanes - 1; k++) {
+ if (v->RequiresDSC[i][k] == true) {
+ if (v->ODMCombineEnablePerState[i][k] == dm_odm_combine_mode_4to1) {
+ v->TotalDSCUnitsRequired = v->TotalDSCUnitsRequired + 4.0;
+ } else if (v->ODMCombineEnablePerState[i][k] == dm_odm_combine_mode_2to1) {
+ v->TotalDSCUnitsRequired = v->TotalDSCUnitsRequired + 2.0;
+ } else {
+ v->TotalDSCUnitsRequired = v->TotalDSCUnitsRequired + 1.0;
+ }
+ }
+ }
+ if (v->TotalDSCUnitsRequired > v->NumberOfDSC) {
+ v->NotEnoughDSCUnits[i] = true;
+ }
+ }
+ /*DSC Delay per state*/
+
+ for (i = 0; i < v->soc.num_states; i++) {
+ for (k = 0; k <= v->NumberOfActivePlanes - 1; k++) {
+ if (v->OutputBppPerState[i][k] == BPP_INVALID) {
+ v->BPP = 0.0;
+ } else {
+ v->BPP = v->OutputBppPerState[i][k];
+ }
+ if (v->RequiresDSC[i][k] == true && v->BPP != 0.0) {
+ if (v->ODMCombineEnablePerState[i][k] == dm_odm_combine_mode_disabled) {
+ v->DSCDelayPerState[i][k] = dscceComputeDelay(
+ v->DSCInputBitPerComponent[k],
+ v->BPP,
+ dml_ceil(1.0 * v->HActive[k] / v->NumberOfDSCSlices[k], 1.0),
+ v->NumberOfDSCSlices[k],
+ v->OutputFormat[k],
+ v->Output[k]) + dscComputeDelay(v->OutputFormat[k], v->Output[k]);
+ } else if (v->ODMCombineEnablePerState[i][k] == dm_odm_combine_mode_2to1) {
+ v->DSCDelayPerState[i][k] = 2.0
+ * dscceComputeDelay(
+ v->DSCInputBitPerComponent[k],
+ v->BPP,
+ dml_ceil(1.0 * v->HActive[k] / v->NumberOfDSCSlices[k], 1.0),
+ v->NumberOfDSCSlices[k] / 2,
+ v->OutputFormat[k],
+ v->Output[k]) + dscComputeDelay(v->OutputFormat[k], v->Output[k]);
+ } else {
+ v->DSCDelayPerState[i][k] = 4.0
+ * (dscceComputeDelay(
+ v->DSCInputBitPerComponent[k],
+ v->BPP,
+ dml_ceil(1.0 * v->HActive[k] / v->NumberOfDSCSlices[k], 1.0),
+ v->NumberOfDSCSlices[k] / 4,
+ v->OutputFormat[k],
+ v->Output[k]) + dscComputeDelay(v->OutputFormat[k], v->Output[k]));
+ }
+ v->DSCDelayPerState[i][k] = v->DSCDelayPerState[i][k] * v->PixelClock[k] / v->PixelClockBackEnd[k];
+ } else {
+ v->DSCDelayPerState[i][k] = 0.0;
+ }
+ }
+ for (k = 0; k <= v->NumberOfActivePlanes - 1; k++) {
+ for (m = 0; m <= v->NumberOfActivePlanes - 1; m++) {
+ if (v->BlendingAndTiming[k] == m && v->RequiresDSC[i][m] == true) {
+ v->DSCDelayPerState[i][k] = v->DSCDelayPerState[i][m];
+ }
+ }
+ }
+ }
+
+ //Calculate Swath, DET Configuration, DCFCLKDeepSleep
+ //
+ for (i = 0; i < mode_lib->soc.num_states; ++i) {
+ for (j = 0; j <= 1; ++j) {
+ for (k = 0; k < v->NumberOfActivePlanes; ++k) {
+ v->RequiredDPPCLKThisState[k] = v->RequiredDPPCLK[i][j][k];
+ v->NoOfDPPThisState[k] = v->NoOfDPP[i][j][k];
+ v->ODMCombineEnableThisState[k] = v->ODMCombineEnablePerState[i][k];
+ }
+
+ CalculateSwathAndDETConfiguration(
+ false,
+ v->NumberOfActivePlanes,
+ v->DETBufferSizeInKByte,
+ v->MaximumSwathWidthLuma,
+ v->MaximumSwathWidthChroma,
+ v->SourceScan,
+ v->SourcePixelFormat,
+ v->SurfaceTiling,
+ v->ViewportWidth,
+ v->ViewportHeight,
+ v->SurfaceWidthY,
+ v->SurfaceWidthC,
+ v->SurfaceHeightY,
+ v->SurfaceHeightC,
+ v->Read256BlockHeightY,
+ v->Read256BlockHeightC,
+ v->Read256BlockWidthY,
+ v->Read256BlockWidthC,
+ v->ODMCombineEnableThisState,
+ v->BlendingAndTiming,
+ v->BytePerPixelY,
+ v->BytePerPixelC,
+ v->BytePerPixelInDETY,
+ v->BytePerPixelInDETC,
+ v->HActive,
+ v->HRatio,
+ v->HRatioChroma,
+ v->NoOfDPPThisState,
+ v->swath_width_luma_ub_this_state,
+ v->swath_width_chroma_ub_this_state,
+ v->SwathWidthYThisState,
+ v->SwathWidthCThisState,
+ v->SwathHeightYThisState,
+ v->SwathHeightCThisState,
+ v->DETBufferSizeYThisState,
+ v->DETBufferSizeCThisState,
+ v->dummystring,
+ &v->ViewportSizeSupport[i][j]);
+
+ for (k = 0; k < v->NumberOfActivePlanes; ++k) {
+ v->swath_width_luma_ub_all_states[i][j][k] = v->swath_width_luma_ub_this_state[k];
+ v->swath_width_chroma_ub_all_states[i][j][k] = v->swath_width_chroma_ub_this_state[k];
+ v->SwathWidthYAllStates[i][j][k] = v->SwathWidthYThisState[k];
+ v->SwathWidthCAllStates[i][j][k] = v->SwathWidthCThisState[k];
+ v->SwathHeightYAllStates[i][j][k] = v->SwathHeightYThisState[k];
+ v->SwathHeightCAllStates[i][j][k] = v->SwathHeightCThisState[k];
+ v->DETBufferSizeYAllStates[i][j][k] = v->DETBufferSizeYThisState[k];
+ v->DETBufferSizeCAllStates[i][j][k] = v->DETBufferSizeCThisState[k];
+ }
+
+ }
+ }
+ for (k = 0; k < v->NumberOfActivePlanes; ++k) {
+ v->cursor_bw[k] = v->NumberOfCursors[k] * v->CursorWidth[k][0] * v->CursorBPP[k][0] / 8.0 / (v->HTotal[k] / v->PixelClock[k]) * v->VRatio[k];
+ }
+
+ for (i = 0; i < v->soc.num_states; i++) {
+ for (j = 0; j < 2; j++) {
+ for (k = 0; k <= v->NumberOfActivePlanes - 1; k++) {
+ v->swath_width_luma_ub_this_state[k] = v->swath_width_luma_ub_all_states[i][j][k];
+ v->swath_width_chroma_ub_this_state[k] = v->swath_width_chroma_ub_all_states[i][j][k];
+ v->SwathWidthYThisState[k] = v->SwathWidthYAllStates[i][j][k];
+ v->SwathWidthCThisState[k] = v->SwathWidthCAllStates[i][j][k];
+ v->SwathHeightYThisState[k] = v->SwathHeightYAllStates[i][j][k];
+ v->SwathHeightCThisState[k] = v->SwathHeightCAllStates[i][j][k];
+ v->DETBufferSizeYThisState[k] = v->DETBufferSizeYAllStates[i][j][k];
+ v->DETBufferSizeCThisState[k] = v->DETBufferSizeCAllStates[i][j][k];
+ }
+
+ v->TotalNumberOfDCCActiveDPP[i][j] = 0;
+ for (k = 0; k < v->NumberOfActivePlanes; ++k) {
+ if (v->DCCEnable[k] == true) {
+ v->TotalNumberOfDCCActiveDPP[i][j] = v->TotalNumberOfDCCActiveDPP[i][j] + v->NoOfDPP[i][j][k];
+ }
+ }
+
+ for (k = 0; k <= v->NumberOfActivePlanes - 1; k++) {
+ if (v->SourcePixelFormat[k] == dm_420_8 || v->SourcePixelFormat[k] == dm_420_10 || v->SourcePixelFormat[k] == dm_420_12
+ || v->SourcePixelFormat[k] == dm_rgbe_alpha) {
+
+ if ((v->SourcePixelFormat[k] == dm_420_10 || v->SourcePixelFormat[k] == dm_420_12) && v->SourceScan[k] != dm_vert) {
+ v->PTEBufferSizeInRequestsForLuma = (v->PTEBufferSizeInRequestsLuma + v->PTEBufferSizeInRequestsChroma) / 2;
+ v->PTEBufferSizeInRequestsForChroma = v->PTEBufferSizeInRequestsForLuma;
+ } else {
+ v->PTEBufferSizeInRequestsForLuma = v->PTEBufferSizeInRequestsLuma;
+ v->PTEBufferSizeInRequestsForChroma = v->PTEBufferSizeInRequestsChroma;
+ }
+
+ v->PDEAndMetaPTEBytesPerFrameC = CalculateVMAndRowBytes(
+ mode_lib,
+ v->DCCEnable[k],
+ v->Read256BlockHeightC[k],
+ v->Read256BlockWidthY[k],
+ v->SourcePixelFormat[k],
+ v->SurfaceTiling[k],
+ v->BytePerPixelC[k],
+ v->SourceScan[k],
+ v->SwathWidthCThisState[k],
+ v->ViewportHeightChroma[k],
+ v->GPUVMEnable,
+ v->HostVMEnable,
+ v->HostVMMaxNonCachedPageTableLevels,
+ v->GPUVMMinPageSize,
+ v->HostVMMinPageSize,
+ v->PTEBufferSizeInRequestsForChroma,
+ v->PitchC[k],
+ 0.0,
+ &v->MacroTileWidthC[k],
+ &v->MetaRowBytesC,
+ &v->DPTEBytesPerRowC,
+ &v->PTEBufferSizeNotExceededC[i][j][k],
+ &v->dummyinteger7,
+ &v->dpte_row_height_chroma[k],
+ &v->dummyinteger28,
+ &v->dummyinteger26,
+ &v->dummyinteger23,
+ &v->meta_row_height_chroma[k],
+ &v->dummyinteger8,
+ &v->dummyinteger9,
+ &v->dummyinteger19,
+ &v->dummyinteger20,
+ &v->dummyinteger17,
+ &v->dummyinteger10,
+ &v->dummyinteger11);
+
+ v->PrefetchLinesC[i][j][k] = CalculatePrefetchSourceLines(
+ mode_lib,
+ v->VRatioChroma[k],
+ v->VTAPsChroma[k],
+ v->Interlace[k],
+ v->ProgressiveToInterlaceUnitInOPP,
+ v->SwathHeightCThisState[k],
+ v->ViewportYStartC[k],
+ &v->PrefillC[k],
+ &v->MaxNumSwC[k]);
+ } else {
+ v->PTEBufferSizeInRequestsForLuma = v->PTEBufferSizeInRequestsLuma + v->PTEBufferSizeInRequestsChroma;
+ v->PTEBufferSizeInRequestsForChroma = 0;
+ v->PDEAndMetaPTEBytesPerFrameC = 0.0;
+ v->MetaRowBytesC = 0.0;
+ v->DPTEBytesPerRowC = 0.0;
+ v->PrefetchLinesC[i][j][k] = 0.0;
+ v->PTEBufferSizeNotExceededC[i][j][k] = true;
+ }
+ v->PDEAndMetaPTEBytesPerFrameY = CalculateVMAndRowBytes(
+ mode_lib,
+ v->DCCEnable[k],
+ v->Read256BlockHeightY[k],
+ v->Read256BlockWidthY[k],
+ v->SourcePixelFormat[k],
+ v->SurfaceTiling[k],
+ v->BytePerPixelY[k],
+ v->SourceScan[k],
+ v->SwathWidthYThisState[k],
+ v->ViewportHeight[k],
+ v->GPUVMEnable,
+ v->HostVMEnable,
+ v->HostVMMaxNonCachedPageTableLevels,
+ v->GPUVMMinPageSize,
+ v->HostVMMinPageSize,
+ v->PTEBufferSizeInRequestsForLuma,
+ v->PitchY[k],
+ v->DCCMetaPitchY[k],
+ &v->MacroTileWidthY[k],
+ &v->MetaRowBytesY,
+ &v->DPTEBytesPerRowY,
+ &v->PTEBufferSizeNotExceededY[i][j][k],
+ v->dummyinteger4,
+ &v->dpte_row_height[k],
+ &v->dummyinteger29,
+ &v->dummyinteger27,
+ &v->dummyinteger24,
+ &v->meta_row_height[k],
+ &v->dummyinteger25,
+ &v->dpte_group_bytes[k],
+ &v->dummyinteger21,
+ &v->dummyinteger22,
+ &v->dummyinteger18,
+ &v->dummyinteger5,
+ &v->dummyinteger6);
+ v->PrefetchLinesY[i][j][k] = CalculatePrefetchSourceLines(
+ mode_lib,
+ v->VRatio[k],
+ v->vtaps[k],
+ v->Interlace[k],
+ v->ProgressiveToInterlaceUnitInOPP,
+ v->SwathHeightYThisState[k],
+ v->ViewportYStartY[k],
+ &v->PrefillY[k],
+ &v->MaxNumSwY[k]);
+ v->PDEAndMetaPTEBytesPerFrame[i][j][k] = v->PDEAndMetaPTEBytesPerFrameY + v->PDEAndMetaPTEBytesPerFrameC;
+ v->MetaRowBytes[i][j][k] = v->MetaRowBytesY + v->MetaRowBytesC;
+ v->DPTEBytesPerRow[i][j][k] = v->DPTEBytesPerRowY + v->DPTEBytesPerRowC;
+
+ CalculateRowBandwidth(
+ v->GPUVMEnable,
+ v->SourcePixelFormat[k],
+ v->VRatio[k],
+ v->VRatioChroma[k],
+ v->DCCEnable[k],
+ v->HTotal[k] / v->PixelClock[k],
+ v->MetaRowBytesY,
+ v->MetaRowBytesC,
+ v->meta_row_height[k],
+ v->meta_row_height_chroma[k],
+ v->DPTEBytesPerRowY,
+ v->DPTEBytesPerRowC,
+ v->dpte_row_height[k],
+ v->dpte_row_height_chroma[k],
+ &v->meta_row_bandwidth[i][j][k],
+ &v->dpte_row_bandwidth[i][j][k]);
+ }
+ v->UrgLatency[i] = CalculateUrgentLatency(
+ v->UrgentLatencyPixelDataOnly,
+ v->UrgentLatencyPixelMixedWithVMData,
+ v->UrgentLatencyVMDataOnly,
+ v->DoUrgentLatencyAdjustment,
+ v->UrgentLatencyAdjustmentFabricClockComponent,
+ v->UrgentLatencyAdjustmentFabricClockReference,
+ v->FabricClockPerState[i]);
+
+ for (k = 0; k < v->NumberOfActivePlanes; ++k) {
+ CalculateUrgentBurstFactor(
+ v->swath_width_luma_ub_this_state[k],
+ v->swath_width_chroma_ub_this_state[k],
+ v->DETBufferSizeInKByte,
+ v->SwathHeightYThisState[k],
+ v->SwathHeightCThisState[k],
+ v->HTotal[k] / v->PixelClock[k],
+ v->UrgLatency[i],
+ v->CursorBufferSize,
+ v->CursorWidth[k][0],
+ v->CursorBPP[k][0],
+ v->VRatio[k],
+ v->VRatioChroma[k],
+ v->BytePerPixelInDETY[k],
+ v->BytePerPixelInDETC[k],
+ v->DETBufferSizeYThisState[k],
+ v->DETBufferSizeCThisState[k],
+ &v->UrgentBurstFactorCursor[k],
+ &v->UrgentBurstFactorLuma[k],
+ &v->UrgentBurstFactorChroma[k],
+ &NotUrgentLatencyHiding[k]);
+ }
+
+ v->NotUrgentLatencyHiding[i][j] = false;
+ for (k = 0; k < v->NumberOfActivePlanes; ++k) {
+ if (NotUrgentLatencyHiding[k]) {
+ v->NotUrgentLatencyHiding[i][j] = true;
+ }
+ }
+
+ for (k = 0; k < v->NumberOfActivePlanes; ++k) {
+ v->VActivePixelBandwidth[i][j][k] = v->ReadBandwidthLuma[k] * v->UrgentBurstFactorLuma[k]
+ + v->ReadBandwidthChroma[k] * v->UrgentBurstFactorChroma[k];
+ v->VActiveCursorBandwidth[i][j][k] = v->cursor_bw[k] * v->UrgentBurstFactorCursor[k];
+ }
+
+ v->TotalVActivePixelBandwidth[i][j] = 0;
+ v->TotalVActiveCursorBandwidth[i][j] = 0;
+ v->TotalMetaRowBandwidth[i][j] = 0;
+ v->TotalDPTERowBandwidth[i][j] = 0;
+ for (k = 0; k < v->NumberOfActivePlanes; ++k) {
+ v->TotalVActivePixelBandwidth[i][j] = v->TotalVActivePixelBandwidth[i][j] + v->VActivePixelBandwidth[i][j][k];
+ v->TotalVActiveCursorBandwidth[i][j] = v->TotalVActiveCursorBandwidth[i][j] + v->VActiveCursorBandwidth[i][j][k];
+ v->TotalMetaRowBandwidth[i][j] = v->TotalMetaRowBandwidth[i][j] + v->NoOfDPP[i][j][k] * v->meta_row_bandwidth[i][j][k];
+ v->TotalDPTERowBandwidth[i][j] = v->TotalDPTERowBandwidth[i][j] + v->NoOfDPP[i][j][k] * v->dpte_row_bandwidth[i][j][k];
+ }
+
+ CalculateDCFCLKDeepSleep(
+ mode_lib,
+ v->NumberOfActivePlanes,
+ v->BytePerPixelY,
+ v->BytePerPixelC,
+ v->VRatio,
+ v->VRatioChroma,
+ v->SwathWidthYThisState,
+ v->SwathWidthCThisState,
+ v->NoOfDPPThisState,
+ v->HRatio,
+ v->HRatioChroma,
+ v->PixelClock,
+ v->PSCL_FACTOR,
+ v->PSCL_FACTOR_CHROMA,
+ v->RequiredDPPCLKThisState,
+ v->ReadBandwidthLuma,
+ v->ReadBandwidthChroma,
+ v->ReturnBusWidth,
+ &v->ProjectedDCFCLKDeepSleep[i][j]);
+ }
+ }
+
+ //Calculate Return BW
+
+ for (i = 0; i < mode_lib->soc.num_states; ++i) {
+ for (j = 0; j <= 1; ++j) {
+ for (k = 0; k <= v->NumberOfActivePlanes - 1; k++) {
+ if (v->BlendingAndTiming[k] == k) {
+ if (v->WritebackEnable[k] == true) {
+ v->WritebackDelayTime[k] = v->WritebackLatency
+ + CalculateWriteBackDelay(
+ v->WritebackPixelFormat[k],
+ v->WritebackHRatio[k],
+ v->WritebackVRatio[k],
+ v->WritebackVTaps[k],
+ v->WritebackDestinationWidth[k],
+ v->WritebackDestinationHeight[k],
+ v->WritebackSourceHeight[k],
+ v->HTotal[k]) / v->RequiredDISPCLK[i][j];
+ } else {
+ v->WritebackDelayTime[k] = 0.0;
+ }
+ for (m = 0; m <= v->NumberOfActivePlanes - 1; m++) {
+ if (v->BlendingAndTiming[m] == k && v->WritebackEnable[m] == true) {
+ v->WritebackDelayTime[k] = dml_max(
+ v->WritebackDelayTime[k],
+ v->WritebackLatency
+ + CalculateWriteBackDelay(
+ v->WritebackPixelFormat[m],
+ v->WritebackHRatio[m],
+ v->WritebackVRatio[m],
+ v->WritebackVTaps[m],
+ v->WritebackDestinationWidth[m],
+ v->WritebackDestinationHeight[m],
+ v->WritebackSourceHeight[m],
+ v->HTotal[m]) / v->RequiredDISPCLK[i][j]);
+ }
+ }
+ }
+ }
+ for (k = 0; k <= v->NumberOfActivePlanes - 1; k++) {
+ for (m = 0; m <= v->NumberOfActivePlanes - 1; m++) {
+ if (v->BlendingAndTiming[k] == m) {
+ v->WritebackDelayTime[k] = v->WritebackDelayTime[m];
+ }
+ }
+ }
+ v->MaxMaxVStartup[i][j] = 0;
+ for (k = 0; k <= v->NumberOfActivePlanes - 1; k++) {
+ v->MaximumVStartup[i][j][k] = v->VTotal[k] - v->VActive[k]
+ - dml_max(1.0, dml_ceil(1.0 * v->WritebackDelayTime[k] / (v->HTotal[k] / v->PixelClock[k]), 1.0));
+ v->MaxMaxVStartup[i][j] = dml_max(v->MaxMaxVStartup[i][j], v->MaximumVStartup[i][j][k]);
+ }
+ }
+ }
+
+ ReorderingBytes = v->NumberOfChannels
+ * dml_max3(
+ v->UrgentOutOfOrderReturnPerChannelPixelDataOnly,
+ v->UrgentOutOfOrderReturnPerChannelPixelMixedWithVMData,
+ v->UrgentOutOfOrderReturnPerChannelVMDataOnly);
+ v->FinalDRAMClockChangeLatency = (v->DRAMClockChangeLatencyOverride > 0 ? v->DRAMClockChangeLatencyOverride : v->DRAMClockChangeLatency);
+
+ for (i = 0; i < mode_lib->soc.num_states; ++i) {
+ for (j = 0; j <= 1; ++j) {
+ v->DCFCLKState[i][j] = v->DCFCLKPerState[i];
+ }
+ }
+
+ if (v->UseMinimumRequiredDCFCLK == true) {
+ UseMinimumDCFCLK(
+ mode_lib,
+ v->MaxInterDCNTileRepeaters,
+ MaxPrefetchMode,
+ v->FinalDRAMClockChangeLatency,
+ v->SREnterPlusExitTime,
+ v->ReturnBusWidth,
+ v->RoundTripPingLatencyCycles,
+ ReorderingBytes,
+ v->PixelChunkSizeInKByte,
+ v->MetaChunkSize,
+ v->GPUVMEnable,
+ v->GPUVMMaxPageTableLevels,
+ v->HostVMEnable,
+ v->NumberOfActivePlanes,
+ v->HostVMMinPageSize,
+ v->HostVMMaxNonCachedPageTableLevels,
+ v->DynamicMetadataVMEnabled,
+ v->ImmediateFlipRequirement,
+ v->ProgressiveToInterlaceUnitInOPP,
+ v->MaxAveragePercentOfIdealSDPPortBWDisplayCanUseInNormalSystemOperation,
+ v->PercentOfIdealDRAMFabricAndSDPPortBWReceivedAfterUrgLatencyPixelMixedWithVMData,
+ v->PercentOfIdealDRAMFabricAndSDPPortBWReceivedAfterUrgLatencyVMDataOnly,
+ v->PercentOfIdealDRAMFabricAndSDPPortBWReceivedAfterUrgLatencyPixelDataOnly,
+ v->VTotal,
+ v->VActive,
+ v->DynamicMetadataTransmittedBytes,
+ v->DynamicMetadataLinesBeforeActiveRequired,
+ v->Interlace,
+ v->RequiredDPPCLK,
+ v->RequiredDISPCLK,
+ v->UrgLatency,
+ v->NoOfDPP,
+ v->ProjectedDCFCLKDeepSleep,
+ v->MaximumVStartup,
+ v->TotalVActivePixelBandwidth,
+ v->TotalVActiveCursorBandwidth,
+ v->TotalMetaRowBandwidth,
+ v->TotalDPTERowBandwidth,
+ v->TotalNumberOfActiveDPP,
+ v->TotalNumberOfDCCActiveDPP,
+ v->dpte_group_bytes,
+ v->PrefetchLinesY,
+ v->PrefetchLinesC,
+ v->swath_width_luma_ub_all_states,
+ v->swath_width_chroma_ub_all_states,
+ v->BytePerPixelY,
+ v->BytePerPixelC,
+ v->HTotal,
+ v->PixelClock,
+ v->PDEAndMetaPTEBytesPerFrame,
+ v->DPTEBytesPerRow,
+ v->MetaRowBytes,
+ v->DynamicMetadataEnable,
+ v->VActivePixelBandwidth,
+ v->VActiveCursorBandwidth,
+ v->ReadBandwidthLuma,
+ v->ReadBandwidthChroma,
+ v->DCFCLKPerState,
+ v->DCFCLKState);
+
+ if (v->ClampMinDCFCLK) {
+ /* Clamp calculated values to actual minimum */
+ for (i = 0; i < mode_lib->soc.num_states; ++i) {
+ for (j = 0; j <= 1; ++j) {
+ if (v->DCFCLKState[i][j] < mode_lib->soc.min_dcfclk) {
+ v->DCFCLKState[i][j] = mode_lib->soc.min_dcfclk;
+ }
+ }
+ }
+ }
+ }
+
+ for (i = 0; i < mode_lib->soc.num_states; ++i) {
+ for (j = 0; j <= 1; ++j) {
+ v->IdealSDPPortBandwidthPerState[i][j] = dml_min3(
+ v->ReturnBusWidth * v->DCFCLKState[i][j],
+ v->DRAMSpeedPerState[i] * v->NumberOfChannels * v->DRAMChannelWidth,
+ v->FabricClockPerState[i] * v->FabricDatapathToDCNDataReturn);
+ if (v->HostVMEnable != true) {
+ v->ReturnBWPerState[i][j] = v->IdealSDPPortBandwidthPerState[i][j] * v->PercentOfIdealDRAMFabricAndSDPPortBWReceivedAfterUrgLatencyPixelDataOnly
+ / 100;
+ } else {
+ v->ReturnBWPerState[i][j] = v->IdealSDPPortBandwidthPerState[i][j]
+ * v->PercentOfIdealDRAMFabricAndSDPPortBWReceivedAfterUrgLatencyPixelMixedWithVMData / 100;
+ }
+ }
+ }
+
+ //Re-ordering Buffer Support Check
+
+ for (i = 0; i < mode_lib->soc.num_states; ++i) {
+ for (j = 0; j <= 1; ++j) {
+ if ((v->ROBBufferSizeInKByte - v->PixelChunkSizeInKByte) * 1024 / v->ReturnBWPerState[i][j]
+ > (v->RoundTripPingLatencyCycles + 32) / v->DCFCLKState[i][j] + ReorderingBytes / v->ReturnBWPerState[i][j]) {
+ v->ROBSupport[i][j] = true;
+ } else {
+ v->ROBSupport[i][j] = false;
+ }
+ }
+ }
+
+ //Vertical Active BW support check
+
+ MaxTotalVActiveRDBandwidth = 0;
+ for (k = 0; k < v->NumberOfActivePlanes; ++k) {
+ MaxTotalVActiveRDBandwidth = MaxTotalVActiveRDBandwidth + v->ReadBandwidthLuma[k] + v->ReadBandwidthChroma[k];
+ }
+
+ for (i = 0; i < mode_lib->soc.num_states; ++i) {
+ for (j = 0; j <= 1; ++j) {
+ v->MaxTotalVerticalActiveAvailableBandwidth[i][j] = dml_min(
+ v->IdealSDPPortBandwidthPerState[i][j] * v->MaxAveragePercentOfIdealSDPPortBWDisplayCanUseInNormalSystemOperation / 100,
+ v->DRAMSpeedPerState[i] * v->NumberOfChannels * v->DRAMChannelWidth * v->MaxAveragePercentOfIdealDRAMBWDisplayCanUseInNormalSystemOperation
+ / 100);
+ if (MaxTotalVActiveRDBandwidth <= v->MaxTotalVerticalActiveAvailableBandwidth[i][j]) {
+ v->TotalVerticalActiveBandwidthSupport[i][j] = true;
+ } else {
+ v->TotalVerticalActiveBandwidthSupport[i][j] = false;
+ }
+ }
+ }
+
+ //Prefetch Check
+
+ for (i = 0; i < mode_lib->soc.num_states; ++i) {
+ for (j = 0; j <= 1; ++j) {
+ int NextPrefetchModeState = MinPrefetchMode;
+
+ v->TimeCalc = 24 / v->ProjectedDCFCLKDeepSleep[i][j];
+
+ v->BandwidthWithoutPrefetchSupported[i][j] = true;
+ if (v->TotalVActivePixelBandwidth[i][j] + v->TotalVActiveCursorBandwidth[i][j] + v->TotalMetaRowBandwidth[i][j] + v->TotalDPTERowBandwidth[i][j]
+ > v->ReturnBWPerState[i][j] || v->NotUrgentLatencyHiding[i][j]) {
+ v->BandwidthWithoutPrefetchSupported[i][j] = false;
+ }
+
+ for (k = 0; k < v->NumberOfActivePlanes; ++k) {
+ v->NoOfDPPThisState[k] = v->NoOfDPP[i][j][k];
+ v->swath_width_luma_ub_this_state[k] = v->swath_width_luma_ub_all_states[i][j][k];
+ v->swath_width_chroma_ub_this_state[k] = v->swath_width_chroma_ub_all_states[i][j][k];
+ v->SwathWidthYThisState[k] = v->SwathWidthYAllStates[i][j][k];
+ v->SwathWidthCThisState[k] = v->SwathWidthCAllStates[i][j][k];
+ v->SwathHeightYThisState[k] = v->SwathHeightYAllStates[i][j][k];
+ v->SwathHeightCThisState[k] = v->SwathHeightCAllStates[i][j][k];
+ v->DETBufferSizeYThisState[k] = v->DETBufferSizeYAllStates[i][j][k];
+ v->DETBufferSizeCThisState[k] = v->DETBufferSizeCAllStates[i][j][k];
+ v->ODMCombineEnabled[k] = v->ODMCombineEnablePerState[i][k];
+ }
+
+ v->ExtraLatency = CalculateExtraLatency(
+ v->RoundTripPingLatencyCycles,
+ ReorderingBytes,
+ v->DCFCLKState[i][j],
+ v->TotalNumberOfActiveDPP[i][j],
+ v->PixelChunkSizeInKByte,
+ v->TotalNumberOfDCCActiveDPP[i][j],
+ v->MetaChunkSize,
+ v->ReturnBWPerState[i][j],
+ v->GPUVMEnable,
+ v->HostVMEnable,
+ v->NumberOfActivePlanes,
+ v->NoOfDPPThisState,
+ v->dpte_group_bytes,
+ v->PercentOfIdealDRAMFabricAndSDPPortBWReceivedAfterUrgLatencyPixelMixedWithVMData,
+ v->PercentOfIdealDRAMFabricAndSDPPortBWReceivedAfterUrgLatencyVMDataOnly,
+ v->HostVMMinPageSize,
+ v->HostVMMaxNonCachedPageTableLevels);
+
+ v->NextMaxVStartup = v->MaxMaxVStartup[i][j];
+ do {
+ v->PrefetchModePerState[i][j] = NextPrefetchModeState;
+ v->MaxVStartup = v->NextMaxVStartup;
+
+ v->TWait = CalculateTWait(v->PrefetchModePerState[i][j], v->FinalDRAMClockChangeLatency, v->UrgLatency[i], v->SREnterPlusExitTime);
+
+ for (k = 0; k <= v->NumberOfActivePlanes - 1; k++) {
+ Pipe myPipe = { 0 };
+
+ myPipe.DPPCLK = v->RequiredDPPCLK[i][j][k];
+ myPipe.DISPCLK = v->RequiredDISPCLK[i][j];
+ myPipe.PixelClock = v->PixelClock[k];
+ myPipe.DCFCLKDeepSleep = v->ProjectedDCFCLKDeepSleep[i][j];
+ myPipe.DPPPerPlane = v->NoOfDPP[i][j][k];
+ myPipe.ScalerEnabled = v->ScalerEnabled[k];
+ myPipe.SourceScan = v->SourceScan[k];
+ myPipe.BlockWidth256BytesY = v->Read256BlockWidthY[k];
+ myPipe.BlockHeight256BytesY = v->Read256BlockHeightY[k];
+ myPipe.BlockWidth256BytesC = v->Read256BlockWidthC[k];
+ myPipe.BlockHeight256BytesC = v->Read256BlockHeightC[k];
+ myPipe.InterlaceEnable = v->Interlace[k];
+ myPipe.NumberOfCursors = v->NumberOfCursors[k];
+ myPipe.VBlank = v->VTotal[k] - v->VActive[k];
+ myPipe.HTotal = v->HTotal[k];
+ myPipe.DCCEnable = v->DCCEnable[k];
+ myPipe.ODMCombineEnabled = !!v->ODMCombineEnabled[k];
+
+ v->NoTimeForPrefetch[i][j][k] = CalculatePrefetchSchedule(
+ mode_lib,
+ v->PercentOfIdealDRAMFabricAndSDPPortBWReceivedAfterUrgLatencyPixelMixedWithVMData,
+ v->PercentOfIdealDRAMFabricAndSDPPortBWReceivedAfterUrgLatencyVMDataOnly,
+ &myPipe,
+ v->DSCDelayPerState[i][k],
+ v->DPPCLKDelaySubtotal + v->DPPCLKDelayCNVCFormater,
+ v->DPPCLKDelaySCL,
+ v->DPPCLKDelaySCLLBOnly,
+ v->DPPCLKDelayCNVCCursor,
+ v->DISPCLKDelaySubtotal,
+ v->SwathWidthYThisState[k] / v->HRatio[k],
+ v->OutputFormat[k],
+ v->MaxInterDCNTileRepeaters,
+ dml_min(v->MaxVStartup, v->MaximumVStartup[i][j][k]),
+ v->MaximumVStartup[i][j][k],
+ v->GPUVMMaxPageTableLevels,
+ v->GPUVMEnable,
+ v->HostVMEnable,
+ v->HostVMMaxNonCachedPageTableLevels,
+ v->HostVMMinPageSize,
+ v->DynamicMetadataEnable[k],
+ v->DynamicMetadataVMEnabled,
+ v->DynamicMetadataLinesBeforeActiveRequired[k],
+ v->DynamicMetadataTransmittedBytes[k],
+ v->UrgLatency[i],
+ v->ExtraLatency,
+ v->TimeCalc,
+ v->PDEAndMetaPTEBytesPerFrame[i][j][k],
+ v->MetaRowBytes[i][j][k],
+ v->DPTEBytesPerRow[i][j][k],
+ v->PrefetchLinesY[i][j][k],
+ v->SwathWidthYThisState[k],
+ v->BytePerPixelY[k],
+ v->PrefillY[k],
+ v->MaxNumSwY[k],
+ v->PrefetchLinesC[i][j][k],
+ v->SwathWidthCThisState[k],
+ v->BytePerPixelC[k],
+ v->PrefillC[k],
+ v->MaxNumSwC[k],
+ v->swath_width_luma_ub_this_state[k],
+ v->swath_width_chroma_ub_this_state[k],
+ v->SwathHeightYThisState[k],
+ v->SwathHeightCThisState[k],
+ v->TWait,
+ v->ProgressiveToInterlaceUnitInOPP,
+ &v->DSTXAfterScaler[k],
+ &v->DSTYAfterScaler[k],
+ &v->LineTimesForPrefetch[k],
+ &v->PrefetchBW[k],
+ &v->LinesForMetaPTE[k],
+ &v->LinesForMetaAndDPTERow[k],
+ &v->VRatioPreY[i][j][k],
+ &v->VRatioPreC[i][j][k],
+ &v->RequiredPrefetchPixelDataBWLuma[i][j][k],
+ &v->RequiredPrefetchPixelDataBWChroma[i][j][k],
+ &v->NoTimeForDynamicMetadata[i][j][k],
+ &v->Tno_bw[k],
+ &v->prefetch_vmrow_bw[k],
+ &v->Tdmdl_vm[k],
+ &v->Tdmdl[k],
+ &v->VUpdateOffsetPix[k],
+ &v->VUpdateWidthPix[k],
+ &v->VReadyOffsetPix[k]);
+ }
+
+ for (k = 0; k <= v->NumberOfActivePlanes - 1; k++) {
+ CalculateUrgentBurstFactor(
+ v->swath_width_luma_ub_this_state[k],
+ v->swath_width_chroma_ub_this_state[k],
+ v->DETBufferSizeInKByte,
+ v->SwathHeightYThisState[k],
+ v->SwathHeightCThisState[k],
+ v->HTotal[k] / v->PixelClock[k],
+ v->UrgentLatency,
+ v->CursorBufferSize,
+ v->CursorWidth[k][0],
+ v->CursorBPP[k][0],
+ v->VRatioPreY[i][j][k],
+ v->VRatioPreC[i][j][k],
+ v->BytePerPixelInDETY[k],
+ v->BytePerPixelInDETC[k],
+ v->DETBufferSizeYThisState[k],
+ v->DETBufferSizeCThisState[k],
+ &v->UrgentBurstFactorCursorPre[k],
+ &v->UrgentBurstFactorLumaPre[k],
+ &v->UrgentBurstFactorChroma[k],
+ &v->NoUrgentLatencyHidingPre[k]);
+ }
+
+ v->MaximumReadBandwidthWithPrefetch = 0.0;
+ for (k = 0; k <= v->NumberOfActivePlanes - 1; k++) {
+ v->cursor_bw_pre[k] = v->NumberOfCursors[k] * v->CursorWidth[k][0] * v->CursorBPP[k][0] / 8.0 / (v->HTotal[k] / v->PixelClock[k])
+ * v->VRatioPreY[i][j][k];
+
+ v->MaximumReadBandwidthWithPrefetch = v->MaximumReadBandwidthWithPrefetch
+ + dml_max4(
+ v->VActivePixelBandwidth[i][j][k],
+ v->VActiveCursorBandwidth[i][j][k]
+ + v->NoOfDPP[i][j][k] * (v->meta_row_bandwidth[i][j][k] + v->dpte_row_bandwidth[i][j][k]),
+ v->NoOfDPP[i][j][k] * v->prefetch_vmrow_bw[k],
+ v->NoOfDPP[i][j][k]
+ * (v->RequiredPrefetchPixelDataBWLuma[i][j][k] * v->UrgentBurstFactorLumaPre[k]
+ + v->RequiredPrefetchPixelDataBWChroma[i][j][k]
+ * v->UrgentBurstFactorChromaPre[k])
+ + v->cursor_bw_pre[k] * v->UrgentBurstFactorCursorPre[k]);
+ }
+
+ v->NotEnoughUrgentLatencyHidingPre = false;
+ for (k = 0; k <= v->NumberOfActivePlanes - 1; k++) {
+ if (v->NoUrgentLatencyHidingPre[k] == true) {
+ v->NotEnoughUrgentLatencyHidingPre = true;
+ }
+ }
+
+ v->PrefetchSupported[i][j] = true;
+ if (v->BandwidthWithoutPrefetchSupported[i][j] == false || v->MaximumReadBandwidthWithPrefetch > v->ReturnBWPerState[i][j]
+ || v->NotEnoughUrgentLatencyHidingPre == 1) {
+ v->PrefetchSupported[i][j] = false;
+ }
+ for (k = 0; k <= v->NumberOfActivePlanes - 1; k++) {
+ if (v->LineTimesForPrefetch[k] < 2.0 || v->LinesForMetaPTE[k] >= 32.0 || v->LinesForMetaAndDPTERow[k] >= 16.0
+ || v->NoTimeForPrefetch[i][j][k] == true) {
+ v->PrefetchSupported[i][j] = false;
+ }
+ }
+
+ v->DynamicMetadataSupported[i][j] = true;
+ for (k = 0; k < v->NumberOfActivePlanes; ++k) {
+ if (v->NoTimeForDynamicMetadata[i][j][k] == true) {
+ v->DynamicMetadataSupported[i][j] = false;
+ }
+ }
+
+ v->VRatioInPrefetchSupported[i][j] = true;
+ for (k = 0; k <= v->NumberOfActivePlanes - 1; k++) {
+ if (v->VRatioPreY[i][j][k] > 4.0 || v->VRatioPreC[i][j][k] > 4.0 || v->NoTimeForPrefetch[i][j][k] == true) {
+ v->VRatioInPrefetchSupported[i][j] = false;
+ }
+ }
+ v->AnyLinesForVMOrRowTooLarge = false;
+ for (k = 0; k < v->NumberOfActivePlanes; ++k) {
+ if (v->LinesForMetaAndDPTERow[k] >= 16 || v->LinesForMetaPTE[k] >= 32) {
+ v->AnyLinesForVMOrRowTooLarge = true;
+ }
+ }
+
+ if (v->PrefetchSupported[i][j] == true && v->VRatioInPrefetchSupported[i][j] == true) {
+ v->BandwidthAvailableForImmediateFlip = v->ReturnBWPerState[i][j];
+ for (k = 0; k <= v->NumberOfActivePlanes - 1; k++) {
+ v->BandwidthAvailableForImmediateFlip = v->BandwidthAvailableForImmediateFlip
+ - dml_max(
+ v->VActivePixelBandwidth[i][j][k] + v->VActiveCursorBandwidth[i][j][k],
+ v->NoOfDPP[i][j][k]
+ * (v->RequiredPrefetchPixelDataBWLuma[i][j][k] * v->UrgentBurstFactorLumaPre[k]
+ + v->RequiredPrefetchPixelDataBWChroma[i][j][k]
+ * v->UrgentBurstFactorChromaPre[k])
+ + v->cursor_bw_pre[k] * v->UrgentBurstFactorCursorPre[k]);
+ }
+ v->TotImmediateFlipBytes = 0.0;
+ for (k = 0; k <= v->NumberOfActivePlanes - 1; k++) {
+ v->TotImmediateFlipBytes = v->TotImmediateFlipBytes + v->NoOfDPP[i][j][k] * v->PDEAndMetaPTEBytesPerFrame[i][j][k]
+ + v->MetaRowBytes[i][j][k] + v->DPTEBytesPerRow[i][j][k];
+ }
+
+ for (k = 0; k <= v->NumberOfActivePlanes - 1; k++) {
+ CalculateFlipSchedule(
+ mode_lib,
+ v->PercentOfIdealDRAMFabricAndSDPPortBWReceivedAfterUrgLatencyPixelMixedWithVMData,
+ v->PercentOfIdealDRAMFabricAndSDPPortBWReceivedAfterUrgLatencyVMDataOnly,
+ v->ExtraLatency,
+ v->UrgLatency[i],
+ v->GPUVMMaxPageTableLevels,
+ v->HostVMEnable,
+ v->HostVMMaxNonCachedPageTableLevels,
+ v->GPUVMEnable,
+ v->HostVMMinPageSize,
+ v->PDEAndMetaPTEBytesPerFrame[i][j][k],
+ v->MetaRowBytes[i][j][k],
+ v->DPTEBytesPerRow[i][j][k],
+ v->BandwidthAvailableForImmediateFlip,
+ v->TotImmediateFlipBytes,
+ v->SourcePixelFormat[k],
+ v->HTotal[k] / v->PixelClock[k],
+ v->VRatio[k],
+ v->VRatioChroma[k],
+ v->Tno_bw[k],
+ v->DCCEnable[k],
+ v->dpte_row_height[k],
+ v->meta_row_height[k],
+ v->dpte_row_height_chroma[k],
+ v->meta_row_height_chroma[k],
+ &v->DestinationLinesToRequestVMInImmediateFlip[k],
+ &v->DestinationLinesToRequestRowInImmediateFlip[k],
+ &v->final_flip_bw[k],
+ &v->ImmediateFlipSupportedForPipe[k]);
+ }
+ v->total_dcn_read_bw_with_flip = 0.0;
+ for (k = 0; k <= v->NumberOfActivePlanes - 1; k++) {
+ v->total_dcn_read_bw_with_flip = v->total_dcn_read_bw_with_flip
+ + dml_max3(
+ v->NoOfDPP[i][j][k] * v->prefetch_vmrow_bw[k],
+ v->NoOfDPP[i][j][k] * v->final_flip_bw[k] + v->VActivePixelBandwidth[i][j][k]
+ + v->VActiveCursorBandwidth[i][j][k],
+ v->NoOfDPP[i][j][k]
+ * (v->final_flip_bw[k]
+ + v->RequiredPrefetchPixelDataBWLuma[i][j][k]
+ * v->UrgentBurstFactorLumaPre[k]
+ + v->RequiredPrefetchPixelDataBWChroma[i][j][k]
+ * v->UrgentBurstFactorChromaPre[k])
+ + v->cursor_bw_pre[k] * v->UrgentBurstFactorCursorPre[k]);
+ }
+ v->ImmediateFlipSupportedForState[i][j] = true;
+ if (v->total_dcn_read_bw_with_flip > v->ReturnBWPerState[i][j]) {
+ v->ImmediateFlipSupportedForState[i][j] = false;
+ }
+ for (k = 0; k <= v->NumberOfActivePlanes - 1; k++) {
+ if (v->ImmediateFlipSupportedForPipe[k] == false) {
+ v->ImmediateFlipSupportedForState[i][j] = false;
+ }
+ }
+ } else {
+ v->ImmediateFlipSupportedForState[i][j] = false;
+ }
+ if (v->MaxVStartup <= 13 || v->AnyLinesForVMOrRowTooLarge == false) {
+ v->NextMaxVStartup = v->MaxMaxVStartup[i][j];
+ NextPrefetchModeState = NextPrefetchModeState + 1;
+ } else {
+ v->NextMaxVStartup = v->NextMaxVStartup - 1;
+ }
+ } while (!((v->PrefetchSupported[i][j] == true && v->DynamicMetadataSupported[i][j] == true && v->VRatioInPrefetchSupported[i][j] == true
+ && ((v->HostVMEnable == false && v->ImmediateFlipRequirement != dm_immediate_flip_required)
+ || v->ImmediateFlipSupportedForState[i][j] == true))
+ || (v->NextMaxVStartup == v->MaxMaxVStartup[i][j] && NextPrefetchModeState > MaxPrefetchMode)));
+
+ CalculateWatermarksAndDRAMSpeedChangeSupport(
+ mode_lib,
+ v->PrefetchModePerState[i][j],
+ v->NumberOfActivePlanes,
+ v->MaxLineBufferLines,
+ v->LineBufferSize,
+ v->DPPOutputBufferPixels,
+ v->DETBufferSizeInKByte,
+ v->WritebackInterfaceBufferSize,
+ v->DCFCLKState[i][j],
+ v->ReturnBWPerState[i][j],
+ v->GPUVMEnable,
+ v->dpte_group_bytes,
+ v->MetaChunkSize,
+ v->UrgLatency[i],
+ v->ExtraLatency,
+ v->WritebackLatency,
+ v->WritebackChunkSize,
+ v->SOCCLKPerState[i],
+ v->FinalDRAMClockChangeLatency,
+ v->SRExitTime,
+ v->SREnterPlusExitTime,
+ v->ProjectedDCFCLKDeepSleep[i][j],
+ v->NoOfDPPThisState,
+ v->DCCEnable,
+ v->RequiredDPPCLKThisState,
+ v->DETBufferSizeYThisState,
+ v->DETBufferSizeCThisState,
+ v->SwathHeightYThisState,
+ v->SwathHeightCThisState,
+ v->LBBitPerPixel,
+ v->SwathWidthYThisState,
+ v->SwathWidthCThisState,
+ v->HRatio,
+ v->HRatioChroma,
+ v->vtaps,
+ v->VTAPsChroma,
+ v->VRatio,
+ v->VRatioChroma,
+ v->HTotal,
+ v->PixelClock,
+ v->BlendingAndTiming,
+ v->BytePerPixelInDETY,
+ v->BytePerPixelInDETC,
+ v->DSTXAfterScaler,
+ v->DSTYAfterScaler,
+ v->WritebackEnable,
+ v->WritebackPixelFormat,
+ v->WritebackDestinationWidth,
+ v->WritebackDestinationHeight,
+ v->WritebackSourceHeight,
+ &v->DRAMClockChangeSupport[i][j],
+ &v->UrgentWatermark,
+ &v->WritebackUrgentWatermark,
+ &v->DRAMClockChangeWatermark,
+ &v->WritebackDRAMClockChangeWatermark,
+ &v->StutterExitWatermark,
+ &v->StutterEnterPlusExitWatermark,
+ &v->MinActiveDRAMClockChangeLatencySupported);
+ }
+ }
+
+ /*PTE Buffer Size Check*/
+
+ for (i = 0; i < v->soc.num_states; i++) {
+ for (j = 0; j < 2; j++) {
+ v->PTEBufferSizeNotExceeded[i][j] = true;
+ for (k = 0; k <= v->NumberOfActivePlanes - 1; k++) {
+ if (v->PTEBufferSizeNotExceededY[i][j][k] == false || v->PTEBufferSizeNotExceededC[i][j][k] == false) {
+ v->PTEBufferSizeNotExceeded[i][j] = false;
+ }
+ }
+ }
+ }
+ /*Cursor Support Check*/
+
+ v->CursorSupport = true;
+ for (k = 0; k <= v->NumberOfActivePlanes - 1; k++) {
+ if (v->CursorWidth[k][0] > 0.0) {
+ if (v->CursorBPP[k][0] == 64 && v->Cursor64BppSupport == false) {
+ v->CursorSupport = false;
+ }
+ }
+ }
+ /*Valid Pitch Check*/
+
+ v->PitchSupport = true;
+ for (k = 0; k <= v->NumberOfActivePlanes - 1; k++) {
+ v->AlignedYPitch[k] = dml_ceil(dml_max(v->PitchY[k], v->SurfaceWidthY[k]), v->MacroTileWidthY[k]);
+ if (v->DCCEnable[k] == true) {
+ v->AlignedDCCMetaPitchY[k] = dml_ceil(dml_max(v->DCCMetaPitchY[k], v->SurfaceWidthY[k]), 64.0 * v->Read256BlockWidthY[k]);
+ } else {
+ v->AlignedDCCMetaPitchY[k] = v->DCCMetaPitchY[k];
+ }
+ if (v->SourcePixelFormat[k] != dm_444_64 && v->SourcePixelFormat[k] != dm_444_32 && v->SourcePixelFormat[k] != dm_444_16 && v->SourcePixelFormat[k] != dm_mono_16
+ && v->SourcePixelFormat[k] != dm_rgbe && v->SourcePixelFormat[k] != dm_mono_8) {
+ v->AlignedCPitch[k] = dml_ceil(dml_max(v->PitchC[k], v->SurfaceWidthC[k]), v->MacroTileWidthC[k]);
+ if (v->DCCEnable[k] == true) {
+ v->AlignedDCCMetaPitchC[k] = dml_ceil(dml_max(v->DCCMetaPitchC[k], v->SurfaceWidthC[k]), 64.0 * v->Read256BlockWidthC[k]);
+ } else {
+ v->AlignedDCCMetaPitchC[k] = v->DCCMetaPitchC[k];
+ }
+ } else {
+ v->AlignedCPitch[k] = v->PitchC[k];
+ v->AlignedDCCMetaPitchC[k] = v->DCCMetaPitchC[k];
+ }
+ if (v->AlignedYPitch[k] > v->PitchY[k] || v->AlignedCPitch[k] > v->PitchC[k] || v->AlignedDCCMetaPitchY[k] > v->DCCMetaPitchY[k]
+ || v->AlignedDCCMetaPitchC[k] > v->DCCMetaPitchC[k]) {
+ v->PitchSupport = false;
+ }
+ }
+
+ for (k = 0; k <= v->NumberOfActivePlanes - 1; k++) {
+ if (v->ViewportWidth[k] > v->SurfaceWidthY[k] || v->ViewportHeight[k] > v->SurfaceHeightY[k])
+ ViewportExceedsSurface = true;
+
+ if (v->SourcePixelFormat[k] != dm_444_64 && v->SourcePixelFormat[k] != dm_444_32 && v->SourcePixelFormat[k] != dm_444_16
+ && v->SourcePixelFormat[k] != dm_444_16 && v->SourcePixelFormat[k] != dm_444_8 && v->SourcePixelFormat[k] != dm_rgbe) {
+ if (v->ViewportWidthChroma[k] > v->SurfaceWidthC[k] || v->ViewportHeightChroma[k] > v->SurfaceHeightC[k]) {
+ ViewportExceedsSurface = true;
+ }
+ }
+ }
+ /*Mode Support, Voltage State and SOC Configuration*/
+
+ for (i = v->soc.num_states - 1; i >= 0; i--) {
+ for (j = 0; j < 2; j++) {
+ if (v->ScaleRatioAndTapsSupport == 1 && v->SourceFormatPixelAndScanSupport == 1 && v->ViewportSizeSupport[i][j] == 1
+ && v->DIOSupport[i] == 1 && v->ODMCombine4To1SupportCheckOK[i] == 1
+ && v->NotEnoughDSCUnits[i] == 0 && v->DSCCLKRequiredMoreThanSupported[i] == 0
+ && v->DTBCLKRequiredMoreThanSupported[i] == 0
+ && v->ROBSupport[i][j] == 1 && v->DISPCLK_DPPCLK_Support[i][j] == 1 && v->TotalAvailablePipesSupport[i][j] == 1
+ && EnoughWritebackUnits == 1 && WritebackModeSupport == 1
+ && v->WritebackLatencySupport == 1 && v->WritebackScaleRatioAndTapsSupport == 1 && v->CursorSupport == 1 && v->PitchSupport == 1
+ && ViewportExceedsSurface == 0 && v->PrefetchSupported[i][j] == 1 && v->DynamicMetadataSupported[i][j] == 1
+ && v->TotalVerticalActiveBandwidthSupport[i][j] == 1 && v->VRatioInPrefetchSupported[i][j] == 1
+ && v->PTEBufferSizeNotExceeded[i][j] == 1 && v->NonsupportedDSCInputBPC == 0
+ && ((v->HostVMEnable == 0 && v->ImmediateFlipRequirement != dm_immediate_flip_required)
+ || v->ImmediateFlipSupportedForState[i][j] == true)) {
+ v->ModeSupport[i][j] = true;
+ } else {
+ v->ModeSupport[i][j] = false;
+ }
+ }
+ }
+ {
+ unsigned int MaximumMPCCombine = 0;
+ for (i = v->soc.num_states; i >= 0; i--) {
+ if (i == v->soc.num_states || v->ModeSupport[i][0] == true || v->ModeSupport[i][1] == true) {
+ v->VoltageLevel = i;
+ v->ModeIsSupported = v->ModeSupport[i][0] == true || v->ModeSupport[i][1] == true;
+ if (v->ModeSupport[i][1] == true) {
+ MaximumMPCCombine = 1;
+ } else {
+ MaximumMPCCombine = 0;
+ }
+ }
+ }
+ v->ImmediateFlipSupport = v->ImmediateFlipSupportedForState[v->VoltageLevel][MaximumMPCCombine];
+ for (k = 0; k <= v->NumberOfActivePlanes - 1; k++) {
+ v->MPCCombineEnable[k] = v->MPCCombine[v->VoltageLevel][MaximumMPCCombine][k];
+ v->DPPPerPlane[k] = v->NoOfDPP[v->VoltageLevel][MaximumMPCCombine][k];
+ }
+ v->DCFCLK = v->DCFCLKState[v->VoltageLevel][MaximumMPCCombine];
+ v->DRAMSpeed = v->DRAMSpeedPerState[v->VoltageLevel];
+ v->FabricClock = v->FabricClockPerState[v->VoltageLevel];
+ v->SOCCLK = v->SOCCLKPerState[v->VoltageLevel];
+ v->ReturnBW = v->ReturnBWPerState[v->VoltageLevel][MaximumMPCCombine];
+ v->maxMpcComb = MaximumMPCCombine;
+ }
+}
+
+static void CalculateWatermarksAndDRAMSpeedChangeSupport(
+ struct display_mode_lib *mode_lib,
+ unsigned int PrefetchMode,
+ unsigned int NumberOfActivePlanes,
+ unsigned int MaxLineBufferLines,
+ unsigned int LineBufferSize,
+ unsigned int DPPOutputBufferPixels,
+ double DETBufferSizeInKByte,
+ unsigned int WritebackInterfaceBufferSize,
+ double DCFCLK,
+ double ReturnBW,
+ bool GPUVMEnable,
+ unsigned int dpte_group_bytes[],
+ unsigned int MetaChunkSize,
+ double UrgentLatency,
+ double ExtraLatency,
+ double WritebackLatency,
+ double WritebackChunkSize,
+ double SOCCLK,
+ double DRAMClockChangeLatency,
+ double SRExitTime,
+ double SREnterPlusExitTime,
+ double DCFCLKDeepSleep,
+ unsigned int DPPPerPlane[],
+ bool DCCEnable[],
+ double DPPCLK[],
+ double DETBufferSizeY[],
+ double DETBufferSizeC[],
+ unsigned int SwathHeightY[],
+ unsigned int SwathHeightC[],
+ unsigned int LBBitPerPixel[],
+ double SwathWidthY[],
+ double SwathWidthC[],
+ double HRatio[],
+ double HRatioChroma[],
+ unsigned int vtaps[],
+ unsigned int VTAPsChroma[],
+ double VRatio[],
+ double VRatioChroma[],
+ unsigned int HTotal[],
+ double PixelClock[],
+ unsigned int BlendingAndTiming[],
+ double BytePerPixelDETY[],
+ double BytePerPixelDETC[],
+ double DSTXAfterScaler[],
+ double DSTYAfterScaler[],
+ bool WritebackEnable[],
+ enum source_format_class WritebackPixelFormat[],
+ double WritebackDestinationWidth[],
+ double WritebackDestinationHeight[],
+ double WritebackSourceHeight[],
+ enum clock_change_support *DRAMClockChangeSupport,
+ double *UrgentWatermark,
+ double *WritebackUrgentWatermark,
+ double *DRAMClockChangeWatermark,
+ double *WritebackDRAMClockChangeWatermark,
+ double *StutterExitWatermark,
+ double *StutterEnterPlusExitWatermark,
+ double *MinActiveDRAMClockChangeLatencySupported)
+{
+ double EffectiveLBLatencyHidingY = 0;
+ double EffectiveLBLatencyHidingC = 0;
+ double LinesInDETY[DC__NUM_DPP__MAX] = { 0 };
+ double LinesInDETC = 0;
+ unsigned int LinesInDETYRoundedDownToSwath[DC__NUM_DPP__MAX] = { 0 };
+ unsigned int LinesInDETCRoundedDownToSwath = 0;
+ double FullDETBufferingTimeY[DC__NUM_DPP__MAX] = { 0 };
+ double FullDETBufferingTimeC = 0;
+ double ActiveDRAMClockChangeLatencyMarginY = 0;
+ double ActiveDRAMClockChangeLatencyMarginC = 0;
+ double WritebackDRAMClockChangeLatencyMargin = 0;
+ double PlaneWithMinActiveDRAMClockChangeMargin = 0;
+ double SecondMinActiveDRAMClockChangeMarginOneDisplayInVBLank = 0;
+ double FullDETBufferingTimeYStutterCriticalPlane = 0;
+ double TimeToFinishSwathTransferStutterCriticalPlane = 0;
+ double WritebackDRAMClockChangeLatencyHiding = 0;
+ unsigned int k, j;
+
+ mode_lib->vba.TotalActiveDPP = 0;
+ mode_lib->vba.TotalDCCActiveDPP = 0;
+ for (k = 0; k < NumberOfActivePlanes; ++k) {
+ mode_lib->vba.TotalActiveDPP = mode_lib->vba.TotalActiveDPP + DPPPerPlane[k];
+ if (DCCEnable[k] == true) {
+ mode_lib->vba.TotalDCCActiveDPP = mode_lib->vba.TotalDCCActiveDPP + DPPPerPlane[k];
+ }
+ }
+
+ *UrgentWatermark = UrgentLatency + ExtraLatency;
+
+ *DRAMClockChangeWatermark = DRAMClockChangeLatency + *UrgentWatermark;
+
+ mode_lib->vba.TotalActiveWriteback = 0;
+ for (k = 0; k < NumberOfActivePlanes; ++k) {
+ if (WritebackEnable[k] == true) {
+ mode_lib->vba.TotalActiveWriteback = mode_lib->vba.TotalActiveWriteback + 1;
+ }
+ }
+
+ if (mode_lib->vba.TotalActiveWriteback <= 1) {
+ *WritebackUrgentWatermark = WritebackLatency;
+ } else {
+ *WritebackUrgentWatermark = WritebackLatency + WritebackChunkSize * 1024.0 / 32.0 / SOCCLK;
+ }
+
+ if (mode_lib->vba.TotalActiveWriteback <= 1) {
+ *WritebackDRAMClockChangeWatermark = DRAMClockChangeLatency + WritebackLatency;
+ } else {
+ *WritebackDRAMClockChangeWatermark = DRAMClockChangeLatency + WritebackLatency + WritebackChunkSize * 1024.0 / 32.0 / SOCCLK;
+ }
+
+ for (k = 0; k < NumberOfActivePlanes; ++k) {
+
+ mode_lib->vba.LBLatencyHidingSourceLinesY = dml_min((double) MaxLineBufferLines, dml_floor(LineBufferSize / LBBitPerPixel[k] / (SwathWidthY[k] / dml_max(HRatio[k], 1.0)), 1)) - (vtaps[k] - 1);
+
+ mode_lib->vba.LBLatencyHidingSourceLinesC = dml_min((double) MaxLineBufferLines, dml_floor(LineBufferSize / LBBitPerPixel[k] / (SwathWidthC[k] / dml_max(HRatioChroma[k], 1.0)), 1)) - (VTAPsChroma[k] - 1);
+
+ EffectiveLBLatencyHidingY = mode_lib->vba.LBLatencyHidingSourceLinesY / VRatio[k] * (HTotal[k] / PixelClock[k]);
+
+ EffectiveLBLatencyHidingC = mode_lib->vba.LBLatencyHidingSourceLinesC / VRatioChroma[k] * (HTotal[k] / PixelClock[k]);
+
+ LinesInDETY[k] = (double) DETBufferSizeY[k] / BytePerPixelDETY[k] / SwathWidthY[k];
+ LinesInDETYRoundedDownToSwath[k] = dml_floor(LinesInDETY[k], SwathHeightY[k]);
+ FullDETBufferingTimeY[k] = LinesInDETYRoundedDownToSwath[k] * (HTotal[k] / PixelClock[k]) / VRatio[k];
+ if (BytePerPixelDETC[k] > 0) {
+ LinesInDETC = mode_lib->vba.DETBufferSizeC[k] / BytePerPixelDETC[k] / SwathWidthC[k];
+ LinesInDETCRoundedDownToSwath = dml_floor(LinesInDETC, SwathHeightC[k]);
+ FullDETBufferingTimeC = LinesInDETCRoundedDownToSwath * (HTotal[k] / PixelClock[k]) / VRatioChroma[k];
+ } else {
+ LinesInDETC = 0;
+ FullDETBufferingTimeC = 999999;
+ }
+
+ ActiveDRAMClockChangeLatencyMarginY = EffectiveLBLatencyHidingY + FullDETBufferingTimeY[k] - *UrgentWatermark - (HTotal[k] / PixelClock[k]) * (DSTXAfterScaler[k] / HTotal[k] + DSTYAfterScaler[k]) - *DRAMClockChangeWatermark;
+
+ if (NumberOfActivePlanes > 1) {
+ ActiveDRAMClockChangeLatencyMarginY = ActiveDRAMClockChangeLatencyMarginY - (1 - 1.0 / NumberOfActivePlanes) * SwathHeightY[k] * HTotal[k] / PixelClock[k] / VRatio[k];
+ }
+
+ if (BytePerPixelDETC[k] > 0) {
+ ActiveDRAMClockChangeLatencyMarginC = EffectiveLBLatencyHidingC + FullDETBufferingTimeC - *UrgentWatermark - (HTotal[k] / PixelClock[k]) * (DSTXAfterScaler[k] / HTotal[k] + DSTYAfterScaler[k]) - *DRAMClockChangeWatermark;
+
+ if (NumberOfActivePlanes > 1) {
+ ActiveDRAMClockChangeLatencyMarginC = ActiveDRAMClockChangeLatencyMarginC - (1 - 1.0 / NumberOfActivePlanes) * SwathHeightC[k] * HTotal[k] / PixelClock[k] / VRatioChroma[k];
+ }
+ mode_lib->vba.ActiveDRAMClockChangeLatencyMargin[k] = dml_min(ActiveDRAMClockChangeLatencyMarginY, ActiveDRAMClockChangeLatencyMarginC);
+ } else {
+ mode_lib->vba.ActiveDRAMClockChangeLatencyMargin[k] = ActiveDRAMClockChangeLatencyMarginY;
+ }
+
+ if (WritebackEnable[k] == true) {
+
+ WritebackDRAMClockChangeLatencyHiding = WritebackInterfaceBufferSize * 1024 / (WritebackDestinationWidth[k] * WritebackDestinationHeight[k] / (WritebackSourceHeight[k] * HTotal[k] / PixelClock[k]) * 4);
+ if (WritebackPixelFormat[k] == dm_444_64) {
+ WritebackDRAMClockChangeLatencyHiding = WritebackDRAMClockChangeLatencyHiding / 2;
+ }
+ if (mode_lib->vba.WritebackConfiguration == dm_whole_buffer_for_single_stream_interleave || mode_lib->vba.WritebackConfiguration == dm_whole_buffer_for_single_stream_interleave) {
+ WritebackDRAMClockChangeLatencyHiding = WritebackDRAMClockChangeLatencyHiding * 2;
+ }
+ WritebackDRAMClockChangeLatencyMargin = WritebackDRAMClockChangeLatencyHiding - mode_lib->vba.WritebackDRAMClockChangeWatermark;
+ mode_lib->vba.ActiveDRAMClockChangeLatencyMargin[k] = dml_min(mode_lib->vba.ActiveDRAMClockChangeLatencyMargin[k], WritebackDRAMClockChangeLatencyMargin);
+ }
+ }
+
+ mode_lib->vba.MinActiveDRAMClockChangeMargin = 999999;
+ PlaneWithMinActiveDRAMClockChangeMargin = 0;
+ for (k = 0; k < NumberOfActivePlanes; ++k) {
+ if (mode_lib->vba.ActiveDRAMClockChangeLatencyMargin[k] < mode_lib->vba.MinActiveDRAMClockChangeMargin) {
+ mode_lib->vba.MinActiveDRAMClockChangeMargin = mode_lib->vba.ActiveDRAMClockChangeLatencyMargin[k];
+ if (BlendingAndTiming[k] == k) {
+ PlaneWithMinActiveDRAMClockChangeMargin = k;
+ } else {
+ for (j = 0; j < NumberOfActivePlanes; ++j) {
+ if (BlendingAndTiming[k] == j) {
+ PlaneWithMinActiveDRAMClockChangeMargin = j;
+ }
+ }
+ }
+ }
+ }
+
+ *MinActiveDRAMClockChangeLatencySupported = mode_lib->vba.MinActiveDRAMClockChangeMargin + DRAMClockChangeLatency;
+
+ SecondMinActiveDRAMClockChangeMarginOneDisplayInVBLank = 999999;
+ for (k = 0; k < NumberOfActivePlanes; ++k) {
+ if (!((k == PlaneWithMinActiveDRAMClockChangeMargin) && (BlendingAndTiming[k] == k)) && !(BlendingAndTiming[k] == PlaneWithMinActiveDRAMClockChangeMargin) && mode_lib->vba.ActiveDRAMClockChangeLatencyMargin[k] < SecondMinActiveDRAMClockChangeMarginOneDisplayInVBLank) {
+ SecondMinActiveDRAMClockChangeMarginOneDisplayInVBLank = mode_lib->vba.ActiveDRAMClockChangeLatencyMargin[k];
+ }
+ }
+
+ mode_lib->vba.TotalNumberOfActiveOTG = 0;
+ for (k = 0; k < NumberOfActivePlanes; ++k) {
+ if (BlendingAndTiming[k] == k) {
+ mode_lib->vba.TotalNumberOfActiveOTG = mode_lib->vba.TotalNumberOfActiveOTG + 1;
+ }
+ }
+
+ if (mode_lib->vba.MinActiveDRAMClockChangeMargin > 0) {
+ *DRAMClockChangeSupport = dm_dram_clock_change_vactive;
+ } else if (((mode_lib->vba.SynchronizedVBlank == true || mode_lib->vba.TotalNumberOfActiveOTG == 1 || SecondMinActiveDRAMClockChangeMarginOneDisplayInVBLank > 0) && PrefetchMode == 0)) {
+ *DRAMClockChangeSupport = dm_dram_clock_change_vblank;
+ } else {
+ *DRAMClockChangeSupport = dm_dram_clock_change_unsupported;
+ }
+
+ FullDETBufferingTimeYStutterCriticalPlane = FullDETBufferingTimeY[0];
+ for (k = 0; k < NumberOfActivePlanes; ++k) {
+ if (FullDETBufferingTimeY[k] <= FullDETBufferingTimeYStutterCriticalPlane) {
+ FullDETBufferingTimeYStutterCriticalPlane = FullDETBufferingTimeY[k];
+ TimeToFinishSwathTransferStutterCriticalPlane = (SwathHeightY[k] - (LinesInDETY[k] - LinesInDETYRoundedDownToSwath[k])) * (HTotal[k] / PixelClock[k]) / VRatio[k];
+ }
+ }
+
+ *StutterExitWatermark = SRExitTime + ExtraLatency + 10 / DCFCLKDeepSleep;
+ *StutterEnterPlusExitWatermark = dml_max(SREnterPlusExitTime + ExtraLatency + 10 / DCFCLKDeepSleep, TimeToFinishSwathTransferStutterCriticalPlane);
+
+}
+
+static void CalculateDCFCLKDeepSleep(
+ struct display_mode_lib *mode_lib,
+ unsigned int NumberOfActivePlanes,
+ int BytePerPixelY[],
+ int BytePerPixelC[],
+ double VRatio[],
+ double VRatioChroma[],
+ double SwathWidthY[],
+ double SwathWidthC[],
+ unsigned int DPPPerPlane[],
+ double HRatio[],
+ double HRatioChroma[],
+ double PixelClock[],
+ double PSCL_THROUGHPUT[],
+ double PSCL_THROUGHPUT_CHROMA[],
+ double DPPCLK[],
+ double ReadBandwidthLuma[],
+ double ReadBandwidthChroma[],
+ int ReturnBusWidth,
+ double *DCFCLKDeepSleep)
+{
+ double DisplayPipeLineDeliveryTimeLuma = 0;
+ double DisplayPipeLineDeliveryTimeChroma = 0;
+ unsigned int k;
+ double ReadBandwidth = 0.0;
+
+ //double DCFCLKDeepSleepPerPlane[DC__NUM_DPP__MAX];
+ for (k = 0; k < NumberOfActivePlanes; ++k) {
+
+ if (VRatio[k] <= 1) {
+ DisplayPipeLineDeliveryTimeLuma = SwathWidthY[k] * DPPPerPlane[k] / HRatio[k] / PixelClock[k];
+ } else {
+ DisplayPipeLineDeliveryTimeLuma = SwathWidthY[k] / PSCL_THROUGHPUT[k] / DPPCLK[k];
+ }
+ if (BytePerPixelC[k] == 0) {
+ DisplayPipeLineDeliveryTimeChroma = 0;
+ } else {
+ if (VRatioChroma[k] <= 1) {
+ DisplayPipeLineDeliveryTimeChroma = SwathWidthC[k] * DPPPerPlane[k] / HRatioChroma[k] / PixelClock[k];
+ } else {
+ DisplayPipeLineDeliveryTimeChroma = SwathWidthC[k] / PSCL_THROUGHPUT_CHROMA[k] / DPPCLK[k];
+ }
+ }
+
+ if (BytePerPixelC[k] > 0) {
+ mode_lib->vba.DCFCLKDeepSleepPerPlane[k] = dml_max(1.1 * SwathWidthY[k] * BytePerPixelY[k] / 32.0 / DisplayPipeLineDeliveryTimeLuma, 1.1 * SwathWidthC[k] * BytePerPixelC[k] / 32.0 / DisplayPipeLineDeliveryTimeChroma);
+ } else {
+ mode_lib->vba.DCFCLKDeepSleepPerPlane[k] = 1.1 * SwathWidthY[k] * BytePerPixelY[k] / 64.0 / DisplayPipeLineDeliveryTimeLuma;
+ }
+ mode_lib->vba.DCFCLKDeepSleepPerPlane[k] = dml_max(mode_lib->vba.DCFCLKDeepSleepPerPlane[k], PixelClock[k] / 16);
+
+ }
+
+ for (k = 0; k < NumberOfActivePlanes; ++k) {
+ ReadBandwidth = ReadBandwidth + ReadBandwidthLuma[k] + ReadBandwidthChroma[k];
+ }
+
+ *DCFCLKDeepSleep = dml_max(8.0, ReadBandwidth / ReturnBusWidth);
+
+ for (k = 0; k < NumberOfActivePlanes; ++k) {
+ *DCFCLKDeepSleep = dml_max(*DCFCLKDeepSleep, mode_lib->vba.DCFCLKDeepSleepPerPlane[k]);
+ }
+}
+
+static void CalculateUrgentBurstFactor(
+ long swath_width_luma_ub,
+ long swath_width_chroma_ub,
+ unsigned int DETBufferSizeInKByte,
+ unsigned int SwathHeightY,
+ unsigned int SwathHeightC,
+ double LineTime,
+ double UrgentLatency,
+ double CursorBufferSize,
+ unsigned int CursorWidth,
+ unsigned int CursorBPP,
+ double VRatio,
+ double VRatioC,
+ double BytePerPixelInDETY,
+ double BytePerPixelInDETC,
+ double DETBufferSizeY,
+ double DETBufferSizeC,
+ double *UrgentBurstFactorCursor,
+ double *UrgentBurstFactorLuma,
+ double *UrgentBurstFactorChroma,
+ bool *NotEnoughUrgentLatencyHiding)
+{
+ double LinesInDETLuma = 0;
+ double LinesInDETChroma = 0;
+ unsigned int LinesInCursorBuffer = 0;
+ double CursorBufferSizeInTime = 0;
+ double DETBufferSizeInTimeLuma = 0;
+ double DETBufferSizeInTimeChroma = 0;
+
+ *NotEnoughUrgentLatencyHiding = 0;
+
+ if (CursorWidth > 0) {
+ LinesInCursorBuffer = 1 << (unsigned int) dml_floor(dml_log2(CursorBufferSize * 1024.0 / (CursorWidth * CursorBPP / 8.0)), 1.0);
+ if (VRatio > 0) {
+ CursorBufferSizeInTime = LinesInCursorBuffer * LineTime / VRatio;
+ if (CursorBufferSizeInTime - UrgentLatency <= 0) {
+ *NotEnoughUrgentLatencyHiding = 1;
+ *UrgentBurstFactorCursor = 0;
+ } else {
+ *UrgentBurstFactorCursor = CursorBufferSizeInTime / (CursorBufferSizeInTime - UrgentLatency);
+ }
+ } else {
+ *UrgentBurstFactorCursor = 1;
+ }
+ }
+
+ LinesInDETLuma = DETBufferSizeY / BytePerPixelInDETY / swath_width_luma_ub;
+ if (VRatio > 0) {
+ DETBufferSizeInTimeLuma = dml_floor(LinesInDETLuma, SwathHeightY) * LineTime / VRatio;
+ if (DETBufferSizeInTimeLuma - UrgentLatency <= 0) {
+ *NotEnoughUrgentLatencyHiding = 1;
+ *UrgentBurstFactorLuma = 0;
+ } else {
+ *UrgentBurstFactorLuma = DETBufferSizeInTimeLuma / (DETBufferSizeInTimeLuma - UrgentLatency);
+ }
+ } else {
+ *UrgentBurstFactorLuma = 1;
+ }
+
+ if (BytePerPixelInDETC > 0) {
+ LinesInDETChroma = DETBufferSizeC / BytePerPixelInDETC / swath_width_chroma_ub;
+ if (VRatio > 0) {
+ DETBufferSizeInTimeChroma = dml_floor(LinesInDETChroma, SwathHeightC) * LineTime / VRatio;
+ if (DETBufferSizeInTimeChroma - UrgentLatency <= 0) {
+ *NotEnoughUrgentLatencyHiding = 1;
+ *UrgentBurstFactorChroma = 0;
+ } else {
+ *UrgentBurstFactorChroma = DETBufferSizeInTimeChroma / (DETBufferSizeInTimeChroma - UrgentLatency);
+ }
+ } else {
+ *UrgentBurstFactorChroma = 1;
+ }
+ }
+}
+
+static void CalculatePixelDeliveryTimes(
+ unsigned int NumberOfActivePlanes,
+ double VRatio[],
+ double VRatioChroma[],
+ double VRatioPrefetchY[],
+ double VRatioPrefetchC[],
+ unsigned int swath_width_luma_ub[],
+ unsigned int swath_width_chroma_ub[],
+ unsigned int DPPPerPlane[],
+ double HRatio[],
+ double HRatioChroma[],
+ double PixelClock[],
+ double PSCL_THROUGHPUT[],
+ double PSCL_THROUGHPUT_CHROMA[],
+ double DPPCLK[],
+ int BytePerPixelC[],
+ enum scan_direction_class SourceScan[],
+ unsigned int NumberOfCursors[],
+ unsigned int CursorWidth[][2],
+ unsigned int CursorBPP[][2],
+ unsigned int BlockWidth256BytesY[],
+ unsigned int BlockHeight256BytesY[],
+ unsigned int BlockWidth256BytesC[],
+ unsigned int BlockHeight256BytesC[],
+ double DisplayPipeLineDeliveryTimeLuma[],
+ double DisplayPipeLineDeliveryTimeChroma[],
+ double DisplayPipeLineDeliveryTimeLumaPrefetch[],
+ double DisplayPipeLineDeliveryTimeChromaPrefetch[],
+ double DisplayPipeRequestDeliveryTimeLuma[],
+ double DisplayPipeRequestDeliveryTimeChroma[],
+ double DisplayPipeRequestDeliveryTimeLumaPrefetch[],
+ double DisplayPipeRequestDeliveryTimeChromaPrefetch[],
+ double CursorRequestDeliveryTime[],
+ double CursorRequestDeliveryTimePrefetch[])
+{
+ double req_per_swath_ub = 0;
+ unsigned int k;
+
+ for (k = 0; k < NumberOfActivePlanes; ++k) {
+ if (VRatio[k] <= 1) {
+ DisplayPipeLineDeliveryTimeLuma[k] = swath_width_luma_ub[k] * DPPPerPlane[k] / HRatio[k] / PixelClock[k];
+ } else {
+ DisplayPipeLineDeliveryTimeLuma[k] = swath_width_luma_ub[k] / PSCL_THROUGHPUT[k] / DPPCLK[k];
+ }
+
+ if (BytePerPixelC[k] == 0) {
+ DisplayPipeLineDeliveryTimeChroma[k] = 0;
+ } else {
+ if (VRatioChroma[k] <= 1) {
+ DisplayPipeLineDeliveryTimeChroma[k] = swath_width_chroma_ub[k] * DPPPerPlane[k] / HRatioChroma[k] / PixelClock[k];
+ } else {
+ DisplayPipeLineDeliveryTimeChroma[k] = swath_width_chroma_ub[k] / PSCL_THROUGHPUT_CHROMA[k] / DPPCLK[k];
+ }
+ }
+
+ if (VRatioPrefetchY[k] <= 1) {
+ DisplayPipeLineDeliveryTimeLumaPrefetch[k] = swath_width_luma_ub[k] * DPPPerPlane[k] / HRatio[k] / PixelClock[k];
+ } else {
+ DisplayPipeLineDeliveryTimeLumaPrefetch[k] = swath_width_luma_ub[k] / PSCL_THROUGHPUT[k] / DPPCLK[k];
+ }
+
+ if (BytePerPixelC[k] == 0) {
+ DisplayPipeLineDeliveryTimeChromaPrefetch[k] = 0;
+ } else {
+ if (VRatioPrefetchC[k] <= 1) {
+ DisplayPipeLineDeliveryTimeChromaPrefetch[k] = swath_width_chroma_ub[k] * DPPPerPlane[k] / HRatioChroma[k] / PixelClock[k];
+ } else {
+ DisplayPipeLineDeliveryTimeChromaPrefetch[k] = swath_width_chroma_ub[k] / PSCL_THROUGHPUT_CHROMA[k] / DPPCLK[k];
+ }
+ }
+ }
+
+ for (k = 0; k < NumberOfActivePlanes; ++k) {
+ if (SourceScan[k] != dm_vert) {
+ req_per_swath_ub = swath_width_luma_ub[k] / BlockWidth256BytesY[k];
+ } else {
+ req_per_swath_ub = swath_width_luma_ub[k] / BlockHeight256BytesY[k];
+ }
+ DisplayPipeRequestDeliveryTimeLuma[k] = DisplayPipeLineDeliveryTimeLuma[k] / req_per_swath_ub;
+ DisplayPipeRequestDeliveryTimeLumaPrefetch[k] = DisplayPipeLineDeliveryTimeLumaPrefetch[k] / req_per_swath_ub;
+ if (BytePerPixelC[k] == 0) {
+ DisplayPipeRequestDeliveryTimeChroma[k] = 0;
+ DisplayPipeRequestDeliveryTimeChromaPrefetch[k] = 0;
+ } else {
+ if (SourceScan[k] != dm_vert) {
+ req_per_swath_ub = swath_width_chroma_ub[k] / BlockWidth256BytesC[k];
+ } else {
+ req_per_swath_ub = swath_width_chroma_ub[k] / BlockHeight256BytesC[k];
+ }
+ DisplayPipeRequestDeliveryTimeChroma[k] = DisplayPipeLineDeliveryTimeChroma[k] / req_per_swath_ub;
+ DisplayPipeRequestDeliveryTimeChromaPrefetch[k] = DisplayPipeLineDeliveryTimeChromaPrefetch[k] / req_per_swath_ub;
+ }
+ }
+
+ for (k = 0; k < NumberOfActivePlanes; ++k) {
+ int cursor_req_per_width = 0;
+ cursor_req_per_width = dml_ceil(CursorWidth[k][0] * CursorBPP[k][0] / 256 / 8, 1);
+ if (NumberOfCursors[k] > 0) {
+ if (VRatio[k] <= 1) {
+ CursorRequestDeliveryTime[k] = CursorWidth[k][0] / HRatio[k] / PixelClock[k] / cursor_req_per_width;
+ } else {
+ CursorRequestDeliveryTime[k] = CursorWidth[k][0] / PSCL_THROUGHPUT[k] / DPPCLK[k] / cursor_req_per_width;
+ }
+ if (VRatioPrefetchY[k] <= 1) {
+ CursorRequestDeliveryTimePrefetch[k] = CursorWidth[k][0] / HRatio[k] / PixelClock[k] / cursor_req_per_width;
+ } else {
+ CursorRequestDeliveryTimePrefetch[k] = CursorWidth[k][0] / PSCL_THROUGHPUT[k] / DPPCLK[k] / cursor_req_per_width;
+ }
+ } else {
+ CursorRequestDeliveryTime[k] = 0;
+ CursorRequestDeliveryTimePrefetch[k] = 0;
+ }
+ }
+}
+
+static void CalculateMetaAndPTETimes(
+ int NumberOfActivePlanes,
+ bool GPUVMEnable,
+ int MetaChunkSize,
+ int MinMetaChunkSizeBytes,
+ int HTotal[],
+ double VRatio[],
+ double VRatioChroma[],
+ double DestinationLinesToRequestRowInVBlank[],
+ double DestinationLinesToRequestRowInImmediateFlip[],
+ bool DCCEnable[],
+ double PixelClock[],
+ int BytePerPixelY[],
+ int BytePerPixelC[],
+ enum scan_direction_class SourceScan[],
+ int dpte_row_height[],
+ int dpte_row_height_chroma[],
+ int meta_row_width[],
+ int meta_row_width_chroma[],
+ int meta_row_height[],
+ int meta_row_height_chroma[],
+ int meta_req_width[],
+ int meta_req_width_chroma[],
+ int meta_req_height[],
+ int meta_req_height_chroma[],
+ int dpte_group_bytes[],
+ int PTERequestSizeY[],
+ int PTERequestSizeC[],
+ int PixelPTEReqWidthY[],
+ int PixelPTEReqHeightY[],
+ int PixelPTEReqWidthC[],
+ int PixelPTEReqHeightC[],
+ int dpte_row_width_luma_ub[],
+ int dpte_row_width_chroma_ub[],
+ double DST_Y_PER_PTE_ROW_NOM_L[],
+ double DST_Y_PER_PTE_ROW_NOM_C[],
+ double DST_Y_PER_META_ROW_NOM_L[],
+ double DST_Y_PER_META_ROW_NOM_C[],
+ double TimePerMetaChunkNominal[],
+ double TimePerChromaMetaChunkNominal[],
+ double TimePerMetaChunkVBlank[],
+ double TimePerChromaMetaChunkVBlank[],
+ double TimePerMetaChunkFlip[],
+ double TimePerChromaMetaChunkFlip[],
+ double time_per_pte_group_nom_luma[],
+ double time_per_pte_group_vblank_luma[],
+ double time_per_pte_group_flip_luma[],
+ double time_per_pte_group_nom_chroma[],
+ double time_per_pte_group_vblank_chroma[],
+ double time_per_pte_group_flip_chroma[])
+{
+ unsigned int meta_chunk_width = 0;
+ unsigned int min_meta_chunk_width = 0;
+ unsigned int meta_chunk_per_row_int = 0;
+ unsigned int meta_row_remainder = 0;
+ unsigned int meta_chunk_threshold = 0;
+ unsigned int meta_chunks_per_row_ub = 0;
+ unsigned int meta_chunk_width_chroma = 0;
+ unsigned int min_meta_chunk_width_chroma = 0;
+ unsigned int meta_chunk_per_row_int_chroma = 0;
+ unsigned int meta_row_remainder_chroma = 0;
+ unsigned int meta_chunk_threshold_chroma = 0;
+ unsigned int meta_chunks_per_row_ub_chroma = 0;
+ unsigned int dpte_group_width_luma = 0;
+ unsigned int dpte_groups_per_row_luma_ub = 0;
+ unsigned int dpte_group_width_chroma = 0;
+ unsigned int dpte_groups_per_row_chroma_ub = 0;
+ unsigned int k;
+
+ for (k = 0; k < NumberOfActivePlanes; ++k) {
+ DST_Y_PER_PTE_ROW_NOM_L[k] = dpte_row_height[k] / VRatio[k];
+ if (BytePerPixelC[k] == 0) {
+ DST_Y_PER_PTE_ROW_NOM_C[k] = 0;
+ } else {
+ DST_Y_PER_PTE_ROW_NOM_C[k] = dpte_row_height_chroma[k] / VRatioChroma[k];
+ }
+ DST_Y_PER_META_ROW_NOM_L[k] = meta_row_height[k] / VRatio[k];
+ if (BytePerPixelC[k] == 0) {
+ DST_Y_PER_META_ROW_NOM_C[k] = 0;
+ } else {
+ DST_Y_PER_META_ROW_NOM_C[k] = meta_row_height_chroma[k] / VRatioChroma[k];
+ }
+ }
+
+ for (k = 0; k < NumberOfActivePlanes; ++k) {
+ if (DCCEnable[k] == true) {
+ meta_chunk_width = MetaChunkSize * 1024 * 256 / BytePerPixelY[k] / meta_row_height[k];
+ min_meta_chunk_width = MinMetaChunkSizeBytes * 256 / BytePerPixelY[k] / meta_row_height[k];
+ meta_chunk_per_row_int = meta_row_width[k] / meta_chunk_width;
+ meta_row_remainder = meta_row_width[k] % meta_chunk_width;
+ if (SourceScan[k] != dm_vert) {
+ meta_chunk_threshold = 2 * min_meta_chunk_width - meta_req_width[k];
+ } else {
+ meta_chunk_threshold = 2 * min_meta_chunk_width - meta_req_height[k];
+ }
+ if (meta_row_remainder <= meta_chunk_threshold) {
+ meta_chunks_per_row_ub = meta_chunk_per_row_int + 1;
+ } else {
+ meta_chunks_per_row_ub = meta_chunk_per_row_int + 2;
+ }
+ TimePerMetaChunkNominal[k] = meta_row_height[k] / VRatio[k] * HTotal[k] / PixelClock[k] / meta_chunks_per_row_ub;
+ TimePerMetaChunkVBlank[k] = DestinationLinesToRequestRowInVBlank[k] * HTotal[k] / PixelClock[k] / meta_chunks_per_row_ub;
+ TimePerMetaChunkFlip[k] = DestinationLinesToRequestRowInImmediateFlip[k] * HTotal[k] / PixelClock[k] / meta_chunks_per_row_ub;
+ if (BytePerPixelC[k] == 0) {
+ TimePerChromaMetaChunkNominal[k] = 0;
+ TimePerChromaMetaChunkVBlank[k] = 0;
+ TimePerChromaMetaChunkFlip[k] = 0;
+ } else {
+ meta_chunk_width_chroma = MetaChunkSize * 1024 * 256 / BytePerPixelC[k] / meta_row_height_chroma[k];
+ min_meta_chunk_width_chroma = MinMetaChunkSizeBytes * 256 / BytePerPixelC[k] / meta_row_height_chroma[k];
+ meta_chunk_per_row_int_chroma = (double) meta_row_width_chroma[k] / meta_chunk_width_chroma;
+ meta_row_remainder_chroma = meta_row_width_chroma[k] % meta_chunk_width_chroma;
+ if (SourceScan[k] != dm_vert) {
+ meta_chunk_threshold_chroma = 2 * min_meta_chunk_width_chroma - meta_req_width_chroma[k];
+ } else {
+ meta_chunk_threshold_chroma = 2 * min_meta_chunk_width_chroma - meta_req_height_chroma[k];
+ }
+ if (meta_row_remainder_chroma <= meta_chunk_threshold_chroma) {
+ meta_chunks_per_row_ub_chroma = meta_chunk_per_row_int_chroma + 1;
+ } else {
+ meta_chunks_per_row_ub_chroma = meta_chunk_per_row_int_chroma + 2;
+ }
+ TimePerChromaMetaChunkNominal[k] = meta_row_height_chroma[k] / VRatioChroma[k] * HTotal[k] / PixelClock[k] / meta_chunks_per_row_ub_chroma;
+ TimePerChromaMetaChunkVBlank[k] = DestinationLinesToRequestRowInVBlank[k] * HTotal[k] / PixelClock[k] / meta_chunks_per_row_ub_chroma;
+ TimePerChromaMetaChunkFlip[k] = DestinationLinesToRequestRowInImmediateFlip[k] * HTotal[k] / PixelClock[k] / meta_chunks_per_row_ub_chroma;
+ }
+ } else {
+ TimePerMetaChunkNominal[k] = 0;
+ TimePerMetaChunkVBlank[k] = 0;
+ TimePerMetaChunkFlip[k] = 0;
+ TimePerChromaMetaChunkNominal[k] = 0;
+ TimePerChromaMetaChunkVBlank[k] = 0;
+ TimePerChromaMetaChunkFlip[k] = 0;
+ }
+ }
+
+ for (k = 0; k < NumberOfActivePlanes; ++k) {
+ if (GPUVMEnable == true) {
+ if (SourceScan[k] != dm_vert) {
+ dpte_group_width_luma = dpte_group_bytes[k] / PTERequestSizeY[k] * PixelPTEReqWidthY[k];
+ } else {
+ dpte_group_width_luma = dpte_group_bytes[k] / PTERequestSizeY[k] * PixelPTEReqHeightY[k];
+ }
+ dpte_groups_per_row_luma_ub = dml_ceil(1.0 * dpte_row_width_luma_ub[k] / dpte_group_width_luma, 1);
+ time_per_pte_group_nom_luma[k] = DST_Y_PER_PTE_ROW_NOM_L[k] * HTotal[k] / PixelClock[k] / dpte_groups_per_row_luma_ub;
+ time_per_pte_group_vblank_luma[k] = DestinationLinesToRequestRowInVBlank[k] * HTotal[k] / PixelClock[k] / dpte_groups_per_row_luma_ub;
+ time_per_pte_group_flip_luma[k] = DestinationLinesToRequestRowInImmediateFlip[k] * HTotal[k] / PixelClock[k] / dpte_groups_per_row_luma_ub;
+ if (BytePerPixelC[k] == 0) {
+ time_per_pte_group_nom_chroma[k] = 0;
+ time_per_pte_group_vblank_chroma[k] = 0;
+ time_per_pte_group_flip_chroma[k] = 0;
+ } else {
+ if (SourceScan[k] != dm_vert) {
+ dpte_group_width_chroma = dpte_group_bytes[k] / PTERequestSizeC[k] * PixelPTEReqWidthC[k];
+ } else {
+ dpte_group_width_chroma = dpte_group_bytes[k] / PTERequestSizeC[k] * PixelPTEReqHeightC[k];
+ }
+ dpte_groups_per_row_chroma_ub = dml_ceil(1.0 * dpte_row_width_chroma_ub[k] / dpte_group_width_chroma, 1);
+ time_per_pte_group_nom_chroma[k] = DST_Y_PER_PTE_ROW_NOM_C[k] * HTotal[k] / PixelClock[k] / dpte_groups_per_row_chroma_ub;
+ time_per_pte_group_vblank_chroma[k] = DestinationLinesToRequestRowInVBlank[k] * HTotal[k] / PixelClock[k] / dpte_groups_per_row_chroma_ub;
+ time_per_pte_group_flip_chroma[k] = DestinationLinesToRequestRowInImmediateFlip[k] * HTotal[k] / PixelClock[k] / dpte_groups_per_row_chroma_ub;
+ }
+ } else {
+ time_per_pte_group_nom_luma[k] = 0;
+ time_per_pte_group_vblank_luma[k] = 0;
+ time_per_pte_group_flip_luma[k] = 0;
+ time_per_pte_group_nom_chroma[k] = 0;
+ time_per_pte_group_vblank_chroma[k] = 0;
+ time_per_pte_group_flip_chroma[k] = 0;
+ }
+ }
+}
+
+static void CalculateVMGroupAndRequestTimes(
+ unsigned int NumberOfActivePlanes,
+ bool GPUVMEnable,
+ unsigned int GPUVMMaxPageTableLevels,
+ unsigned int HTotal[],
+ int BytePerPixelC[],
+ double DestinationLinesToRequestVMInVBlank[],
+ double DestinationLinesToRequestVMInImmediateFlip[],
+ bool DCCEnable[],
+ double PixelClock[],
+ int dpte_row_width_luma_ub[],
+ int dpte_row_width_chroma_ub[],
+ int vm_group_bytes[],
+ unsigned int dpde0_bytes_per_frame_ub_l[],
+ unsigned int dpde0_bytes_per_frame_ub_c[],
+ int meta_pte_bytes_per_frame_ub_l[],
+ int meta_pte_bytes_per_frame_ub_c[],
+ double TimePerVMGroupVBlank[],
+ double TimePerVMGroupFlip[],
+ double TimePerVMRequestVBlank[],
+ double TimePerVMRequestFlip[])
+{
+ int num_group_per_lower_vm_stage = 0;
+ int num_req_per_lower_vm_stage = 0;
+ unsigned int k;
+
+ for (k = 0; k < NumberOfActivePlanes; ++k) {
+ if (GPUVMEnable == true && (DCCEnable[k] == true || GPUVMMaxPageTableLevels > 1)) {
+ if (DCCEnable[k] == false) {
+ if (BytePerPixelC[k] > 0) {
+ num_group_per_lower_vm_stage = dml_ceil((double) (dpde0_bytes_per_frame_ub_l[k])
+ / (double) (vm_group_bytes[k]), 1) + dml_ceil((double) (dpde0_bytes_per_frame_ub_c[k])
+ / (double) (vm_group_bytes[k]), 1);
+ } else {
+ num_group_per_lower_vm_stage = dml_ceil((double) (dpde0_bytes_per_frame_ub_l[k])
+ / (double) (vm_group_bytes[k]), 1);
+ }
+ } else {
+ if (GPUVMMaxPageTableLevels == 1) {
+ if (BytePerPixelC[k] > 0) {
+ num_group_per_lower_vm_stage = dml_ceil((double) (meta_pte_bytes_per_frame_ub_l[k])
+ / (double) (vm_group_bytes[k]), 1) + dml_ceil((double) (meta_pte_bytes_per_frame_ub_c[k])
+ / (double) (vm_group_bytes[k]), 1);
+ } else {
+ num_group_per_lower_vm_stage = dml_ceil((double) (meta_pte_bytes_per_frame_ub_l[k])
+ / (double) (vm_group_bytes[k]), 1);
+ }
+ } else {
+ if (BytePerPixelC[k] > 0) {
+ num_group_per_lower_vm_stage = 2 + dml_ceil((double) (dpde0_bytes_per_frame_ub_l[k]) / (double) (vm_group_bytes[k]), 1)
+ + dml_ceil((double) (dpde0_bytes_per_frame_ub_c[k]) / (double) (vm_group_bytes[k]), 1)
+ + dml_ceil((double) (meta_pte_bytes_per_frame_ub_l[k]) / (double) (vm_group_bytes[k]), 1)
+ + dml_ceil((double) (meta_pte_bytes_per_frame_ub_c[k]) / (double) (vm_group_bytes[k]), 1);
+ } else {
+ num_group_per_lower_vm_stage = 1 + dml_ceil((double) (dpde0_bytes_per_frame_ub_l[k]) / (double) (vm_group_bytes[k]), 1)
+ + dml_ceil((double) (meta_pte_bytes_per_frame_ub_l[k]) / (double) (vm_group_bytes[k]), 1);
+ }
+ }
+ }
+
+ if (DCCEnable[k] == false) {
+ if (BytePerPixelC[k] > 0) {
+ num_req_per_lower_vm_stage = dpde0_bytes_per_frame_ub_l[k] / 64 + dpde0_bytes_per_frame_ub_c[k] / 64;
+ } else {
+ num_req_per_lower_vm_stage = dpde0_bytes_per_frame_ub_l[k] / 64;
+ }
+ } else {
+ if (GPUVMMaxPageTableLevels == 1) {
+ if (BytePerPixelC[k] > 0) {
+ num_req_per_lower_vm_stage = meta_pte_bytes_per_frame_ub_l[k] / 64
+ + meta_pte_bytes_per_frame_ub_c[k] / 64;
+ } else {
+ num_req_per_lower_vm_stage = meta_pte_bytes_per_frame_ub_l[k] / 64;
+ }
+ } else {
+ if (BytePerPixelC[k] > 0) {
+ num_req_per_lower_vm_stage = dpde0_bytes_per_frame_ub_l[k] / 64
+ + dpde0_bytes_per_frame_ub_c[k] / 64 + meta_pte_bytes_per_frame_ub_l[k]
+ / 64 + meta_pte_bytes_per_frame_ub_c[k] / 64;
+ } else {
+ num_req_per_lower_vm_stage = dpde0_bytes_per_frame_ub_l[k] / 64
+ + meta_pte_bytes_per_frame_ub_l[k] / 64;
+ }
+ }
+ }
+
+ TimePerVMGroupVBlank[k] = DestinationLinesToRequestVMInVBlank[k] * HTotal[k] / PixelClock[k]
+ / num_group_per_lower_vm_stage;
+ TimePerVMGroupFlip[k] = DestinationLinesToRequestVMInImmediateFlip[k] * HTotal[k] / PixelClock[k]
+ / num_group_per_lower_vm_stage;
+ TimePerVMRequestVBlank[k] = DestinationLinesToRequestVMInVBlank[k] * HTotal[k] / PixelClock[k]
+ / num_req_per_lower_vm_stage;
+ TimePerVMRequestFlip[k] = DestinationLinesToRequestVMInImmediateFlip[k] * HTotal[k] / PixelClock[k]
+ / num_req_per_lower_vm_stage;
+
+ if (GPUVMMaxPageTableLevels > 2) {
+ TimePerVMGroupVBlank[k] = TimePerVMGroupVBlank[k] / 2;
+ TimePerVMGroupFlip[k] = TimePerVMGroupFlip[k] / 2;
+ TimePerVMRequestVBlank[k] = TimePerVMRequestVBlank[k] / 2;
+ TimePerVMRequestFlip[k] = TimePerVMRequestFlip[k] / 2;
+ }
+
+ } else {
+ TimePerVMGroupVBlank[k] = 0;
+ TimePerVMGroupFlip[k] = 0;
+ TimePerVMRequestVBlank[k] = 0;
+ TimePerVMRequestFlip[k] = 0;
+ }
+ }
+}
+
+static void CalculateStutterEfficiency(
+ int NumberOfActivePlanes,
+ long ROBBufferSizeInKByte,
+ double TotalDataReadBandwidth,
+ double DCFCLK,
+ double ReturnBW,
+ double SRExitTime,
+ bool SynchronizedVBlank,
+ int DPPPerPlane[],
+ double DETBufferSizeY[],
+ int BytePerPixelY[],
+ double BytePerPixelDETY[],
+ double SwathWidthY[],
+ int SwathHeightY[],
+ int SwathHeightC[],
+ double DCCRateLuma[],
+ double DCCRateChroma[],
+ int HTotal[],
+ int VTotal[],
+ double PixelClock[],
+ double VRatio[],
+ enum scan_direction_class SourceScan[],
+ int BlockHeight256BytesY[],
+ int BlockWidth256BytesY[],
+ int BlockHeight256BytesC[],
+ int BlockWidth256BytesC[],
+ int DCCYMaxUncompressedBlock[],
+ int DCCCMaxUncompressedBlock[],
+ int VActive[],
+ bool DCCEnable[],
+ bool WritebackEnable[],
+ double ReadBandwidthPlaneLuma[],
+ double ReadBandwidthPlaneChroma[],
+ double meta_row_bw[],
+ double dpte_row_bw[],
+ double *StutterEfficiencyNotIncludingVBlank,
+ double *StutterEfficiency)
+{
+ double FullDETBufferingTimeY[DC__NUM_DPP__MAX] = { 0 };
+ double FrameTimeForMinFullDETBufferingTime = 0;
+ double StutterPeriod = 0;
+ double AverageReadBandwidth = 0;
+ double TotalRowReadBandwidth = 0;
+ double AverageDCCCompressionRate = 0;
+ double PartOfBurstThatFitsInROB = 0;
+ double StutterBurstTime = 0;
+ int TotalActiveWriteback = 0;
+ double VBlankTime = 0;
+ double SmallestVBlank = 0;
+ int BytePerPixelYCriticalPlane = 0;
+ double SwathWidthYCriticalPlane = 0;
+ double LinesInDETY[DC__NUM_DPP__MAX] = { 0 };
+ double LinesInDETYRoundedDownToSwath[DC__NUM_DPP__MAX] = { 0 };
+ double LinesToFinishSwathTransferStutterCriticalPlane = 0;
+ double MaximumEffectiveCompressionLuma = 0;
+ double MaximumEffectiveCompressionChroma = 0;
+ unsigned int k;
+
+ for (k = 0; k < NumberOfActivePlanes; ++k) {
+ LinesInDETY[k] = DETBufferSizeY[k] / BytePerPixelDETY[k] / SwathWidthY[k];
+ LinesInDETYRoundedDownToSwath[k] = dml_floor(LinesInDETY[k], SwathHeightY[k]);
+ FullDETBufferingTimeY[k] = LinesInDETYRoundedDownToSwath[k] * (HTotal[k] / PixelClock[k]) / VRatio[k];
+ }
+
+ StutterPeriod = FullDETBufferingTimeY[0];
+ FrameTimeForMinFullDETBufferingTime = VTotal[0] * HTotal[0] / PixelClock[0];
+ BytePerPixelYCriticalPlane = BytePerPixelY[0];
+ SwathWidthYCriticalPlane = SwathWidthY[0];
+ LinesToFinishSwathTransferStutterCriticalPlane = SwathHeightY[0]
+ - (LinesInDETY[0] - LinesInDETYRoundedDownToSwath[0]);
+
+ for (k = 0; k < NumberOfActivePlanes; ++k) {
+ if (FullDETBufferingTimeY[k] < StutterPeriod) {
+ StutterPeriod = FullDETBufferingTimeY[k];
+ FrameTimeForMinFullDETBufferingTime = VTotal[k] * HTotal[k] / PixelClock[k];
+ BytePerPixelYCriticalPlane = BytePerPixelY[k];
+ SwathWidthYCriticalPlane = SwathWidthY[k];
+ LinesToFinishSwathTransferStutterCriticalPlane = SwathHeightY[k]
+ - (LinesInDETY[k] - LinesInDETYRoundedDownToSwath[k]);
+ }
+ }
+
+ AverageReadBandwidth = 0;
+ TotalRowReadBandwidth = 0;
+ for (k = 0; k < NumberOfActivePlanes; ++k) {
+ if (DCCEnable[k] == true) {
+ if ((SourceScan[k] == dm_vert && BlockWidth256BytesY[k] > SwathHeightY[k])
+ || (SourceScan[k] != dm_vert
+ && BlockHeight256BytesY[k] > SwathHeightY[k])
+ || DCCYMaxUncompressedBlock[k] < 256) {
+ MaximumEffectiveCompressionLuma = 2;
+ } else {
+ MaximumEffectiveCompressionLuma = 4;
+ }
+ AverageReadBandwidth = AverageReadBandwidth + ReadBandwidthPlaneLuma[k] / dml_min(DCCRateLuma[k], MaximumEffectiveCompressionLuma);
+
+ if (ReadBandwidthPlaneChroma[k] > 0) {
+ if ((SourceScan[k] == dm_vert && BlockWidth256BytesC[k] > SwathHeightC[k])
+ || (SourceScan[k] != dm_vert && BlockHeight256BytesC[k] > SwathHeightC[k])
+ || DCCCMaxUncompressedBlock[k] < 256) {
+ MaximumEffectiveCompressionChroma = 2;
+ } else {
+ MaximumEffectiveCompressionChroma = 4;
+ }
+ AverageReadBandwidth = AverageReadBandwidth + ReadBandwidthPlaneChroma[k] / dml_min(DCCRateChroma[k], MaximumEffectiveCompressionChroma);
+ }
+ } else {
+ AverageReadBandwidth = AverageReadBandwidth + ReadBandwidthPlaneLuma[k] + ReadBandwidthPlaneChroma[k];
+ }
+ TotalRowReadBandwidth = TotalRowReadBandwidth + DPPPerPlane[k] * (meta_row_bw[k] + dpte_row_bw[k]);
+ }
+
+ AverageDCCCompressionRate = TotalDataReadBandwidth / AverageReadBandwidth;
+ PartOfBurstThatFitsInROB = dml_min(StutterPeriod * TotalDataReadBandwidth, ROBBufferSizeInKByte * 1024 * AverageDCCCompressionRate);
+ StutterBurstTime = PartOfBurstThatFitsInROB / AverageDCCCompressionRate / ReturnBW + (StutterPeriod * TotalDataReadBandwidth
+ - PartOfBurstThatFitsInROB) / (DCFCLK * 64) + StutterPeriod * TotalRowReadBandwidth / ReturnBW;
+ StutterBurstTime = dml_max(StutterBurstTime, LinesToFinishSwathTransferStutterCriticalPlane * BytePerPixelYCriticalPlane * SwathWidthYCriticalPlane / ReturnBW);
+
+ TotalActiveWriteback = 0;
+ for (k = 0; k < NumberOfActivePlanes; ++k) {
+ if (WritebackEnable[k] == true) {
+ TotalActiveWriteback = TotalActiveWriteback + 1;
+ }
+ }
+
+ if (TotalActiveWriteback == 0) {
+ *StutterEfficiencyNotIncludingVBlank = (1
+ - (SRExitTime + StutterBurstTime) / StutterPeriod) * 100;
+ } else {
+ *StutterEfficiencyNotIncludingVBlank = 0;
+ }
+
+ if (SynchronizedVBlank == true || NumberOfActivePlanes == 1) {
+ SmallestVBlank = (VTotal[0] - VActive[0]) * HTotal[0] / PixelClock[0];
+ } else {
+ SmallestVBlank = 0;
+ }
+ for (k = 0; k < NumberOfActivePlanes; ++k) {
+ if (SynchronizedVBlank == true || NumberOfActivePlanes == 1) {
+ VBlankTime = (VTotal[k] - VActive[k]) * HTotal[k] / PixelClock[k];
+ } else {
+ VBlankTime = 0;
+ }
+ SmallestVBlank = dml_min(SmallestVBlank, VBlankTime);
+ }
+
+ *StutterEfficiency = (*StutterEfficiencyNotIncludingVBlank / 100.0 * (FrameTimeForMinFullDETBufferingTime - SmallestVBlank) + SmallestVBlank) / FrameTimeForMinFullDETBufferingTime * 100;
+}
+
+static void CalculateSwathAndDETConfiguration(
+ bool ForceSingleDPP,
+ int NumberOfActivePlanes,
+ long DETBufferSizeInKByte,
+ double MaximumSwathWidthLuma[],
+ double MaximumSwathWidthChroma[],
+ enum scan_direction_class SourceScan[],
+ enum source_format_class SourcePixelFormat[],
+ enum dm_swizzle_mode SurfaceTiling[],
+ int ViewportWidth[],
+ int ViewportHeight[],
+ int SurfaceWidthY[],
+ int SurfaceWidthC[],
+ int SurfaceHeightY[],
+ int SurfaceHeightC[],
+ int Read256BytesBlockHeightY[],
+ int Read256BytesBlockHeightC[],
+ int Read256BytesBlockWidthY[],
+ int Read256BytesBlockWidthC[],
+ enum odm_combine_mode ODMCombineEnabled[],
+ int BlendingAndTiming[],
+ int BytePerPixY[],
+ int BytePerPixC[],
+ double BytePerPixDETY[],
+ double BytePerPixDETC[],
+ int HActive[],
+ double HRatio[],
+ double HRatioChroma[],
+ int DPPPerPlane[],
+ int swath_width_luma_ub[],
+ int swath_width_chroma_ub[],
+ double SwathWidth[],
+ double SwathWidthChroma[],
+ int SwathHeightY[],
+ int SwathHeightC[],
+ double DETBufferSizeY[],
+ double DETBufferSizeC[],
+ bool ViewportSizeSupportPerPlane[],
+ bool *ViewportSizeSupport)
+{
+ int MaximumSwathHeightY[DC__NUM_DPP__MAX] = { 0 };
+ int MaximumSwathHeightC[DC__NUM_DPP__MAX] = { 0 };
+ int MinimumSwathHeightY = 0;
+ int MinimumSwathHeightC = 0;
+ long RoundedUpMaxSwathSizeBytesY = 0;
+ long RoundedUpMaxSwathSizeBytesC = 0;
+ long RoundedUpMinSwathSizeBytesY = 0;
+ long RoundedUpMinSwathSizeBytesC = 0;
+ long RoundedUpSwathSizeBytesY = 0;
+ long RoundedUpSwathSizeBytesC = 0;
+ double SwathWidthSingleDPP[DC__NUM_DPP__MAX] = { 0 };
+ double SwathWidthSingleDPPChroma[DC__NUM_DPP__MAX] = { 0 };
+ int k;
+
+ CalculateSwathWidth(
+ ForceSingleDPP,
+ NumberOfActivePlanes,
+ SourcePixelFormat,
+ SourceScan,
+ ViewportWidth,
+ ViewportHeight,
+ SurfaceWidthY,
+ SurfaceWidthC,
+ SurfaceHeightY,
+ SurfaceHeightC,
+ ODMCombineEnabled,
+ BytePerPixY,
+ BytePerPixC,
+ Read256BytesBlockHeightY,
+ Read256BytesBlockHeightC,
+ Read256BytesBlockWidthY,
+ Read256BytesBlockWidthC,
+ BlendingAndTiming,
+ HActive,
+ HRatio,
+ DPPPerPlane,
+ SwathWidthSingleDPP,
+ SwathWidthSingleDPPChroma,
+ SwathWidth,
+ SwathWidthChroma,
+ MaximumSwathHeightY,
+ MaximumSwathHeightC,
+ swath_width_luma_ub,
+ swath_width_chroma_ub);
+
+ *ViewportSizeSupport = true;
+ for (k = 0; k < NumberOfActivePlanes; ++k) {
+ if ((SourcePixelFormat[k] == dm_444_64 || SourcePixelFormat[k] == dm_444_32
+ || SourcePixelFormat[k] == dm_444_16
+ || SourcePixelFormat[k] == dm_mono_16
+ || SourcePixelFormat[k] == dm_mono_8
+ || SourcePixelFormat[k] == dm_rgbe)) {
+ if (SurfaceTiling[k] == dm_sw_linear
+ || (SourcePixelFormat[k] == dm_444_64
+ && (SurfaceTiling[k] == dm_sw_64kb_s || SurfaceTiling[k] == dm_sw_64kb_s_t || SurfaceTiling[k] == dm_sw_64kb_s_x)
+ && SourceScan[k] != dm_vert)) {
+ MinimumSwathHeightY = MaximumSwathHeightY[k];
+ } else if (SourcePixelFormat[k] == dm_444_8 && SourceScan[k] == dm_vert) {
+ MinimumSwathHeightY = MaximumSwathHeightY[k];
+ } else {
+ MinimumSwathHeightY = MaximumSwathHeightY[k] / 2;
+ }
+ MinimumSwathHeightC = MaximumSwathHeightC[k];
+ } else {
+ if (SurfaceTiling[k] == dm_sw_linear) {
+ MinimumSwathHeightY = MaximumSwathHeightY[k];
+ MinimumSwathHeightC = MaximumSwathHeightC[k];
+ } else if (SourcePixelFormat[k] == dm_rgbe_alpha
+ && SourceScan[k] == dm_vert) {
+ MinimumSwathHeightY = MaximumSwathHeightY[k] / 2;
+ MinimumSwathHeightC = MaximumSwathHeightC[k];
+ } else if (SourcePixelFormat[k] == dm_rgbe_alpha) {
+ MinimumSwathHeightY = MaximumSwathHeightY[k] / 2;
+ MinimumSwathHeightC = MaximumSwathHeightC[k] / 2;
+ } else if (SourcePixelFormat[k] == dm_420_8 && SourceScan[k] == dm_vert) {
+ MinimumSwathHeightY = MaximumSwathHeightY[k];
+ MinimumSwathHeightC = MaximumSwathHeightC[k] / 2;
+ } else {
+ MinimumSwathHeightC = MaximumSwathHeightC[k] / 2;
+ MinimumSwathHeightY = MaximumSwathHeightY[k] / 2;
+ }
+ }
+
+ RoundedUpMaxSwathSizeBytesY = swath_width_luma_ub[k] * BytePerPixDETY[k]
+ * MaximumSwathHeightY[k];
+ RoundedUpMinSwathSizeBytesY = swath_width_luma_ub[k] * BytePerPixDETY[k]
+ * MinimumSwathHeightY;
+ if (SourcePixelFormat[k] == dm_420_10) {
+ RoundedUpMaxSwathSizeBytesY = dml_ceil((double) RoundedUpMaxSwathSizeBytesY, 256);
+ RoundedUpMinSwathSizeBytesY = dml_ceil((double) RoundedUpMinSwathSizeBytesY, 256);
+ }
+ RoundedUpMaxSwathSizeBytesC = swath_width_chroma_ub[k] * BytePerPixDETC[k]
+ * MaximumSwathHeightC[k];
+ RoundedUpMinSwathSizeBytesC = swath_width_chroma_ub[k] * BytePerPixDETC[k]
+ * MinimumSwathHeightC;
+ if (SourcePixelFormat[k] == dm_420_10) {
+ RoundedUpMaxSwathSizeBytesC = dml_ceil(RoundedUpMaxSwathSizeBytesC, 256);
+ RoundedUpMinSwathSizeBytesC = dml_ceil(RoundedUpMinSwathSizeBytesC, 256);
+ }
+
+ if (RoundedUpMaxSwathSizeBytesY + RoundedUpMaxSwathSizeBytesC
+ <= DETBufferSizeInKByte * 1024 / 2) {
+ SwathHeightY[k] = MaximumSwathHeightY[k];
+ SwathHeightC[k] = MaximumSwathHeightC[k];
+ RoundedUpSwathSizeBytesY = RoundedUpMaxSwathSizeBytesY;
+ RoundedUpSwathSizeBytesC = RoundedUpMaxSwathSizeBytesC;
+ } else if (RoundedUpMaxSwathSizeBytesY >= 1.5 * RoundedUpMaxSwathSizeBytesC
+ && RoundedUpMinSwathSizeBytesY + RoundedUpMaxSwathSizeBytesC
+ <= DETBufferSizeInKByte * 1024 / 2) {
+ SwathHeightY[k] = MinimumSwathHeightY;
+ SwathHeightC[k] = MaximumSwathHeightC[k];
+ RoundedUpSwathSizeBytesY = RoundedUpMinSwathSizeBytesY;
+ RoundedUpSwathSizeBytesC = RoundedUpMaxSwathSizeBytesC;
+ } else if (RoundedUpMaxSwathSizeBytesY < 1.5 * RoundedUpMaxSwathSizeBytesC
+ && RoundedUpMaxSwathSizeBytesY + RoundedUpMinSwathSizeBytesC
+ <= DETBufferSizeInKByte * 1024 / 2) {
+ SwathHeightY[k] = MaximumSwathHeightY[k];
+ SwathHeightC[k] = MinimumSwathHeightC;
+ RoundedUpSwathSizeBytesY = RoundedUpMaxSwathSizeBytesY;
+ RoundedUpSwathSizeBytesC = RoundedUpMinSwathSizeBytesC;
+ } else {
+ SwathHeightY[k] = MinimumSwathHeightY;
+ SwathHeightC[k] = MinimumSwathHeightC;
+ RoundedUpSwathSizeBytesY = RoundedUpMinSwathSizeBytesY;
+ RoundedUpSwathSizeBytesC = RoundedUpMinSwathSizeBytesC;
+ }
+
+ if (SwathHeightC[k] == 0) {
+ DETBufferSizeY[k] = DETBufferSizeInKByte * 1024;
+ DETBufferSizeC[k] = 0;
+ } else if (RoundedUpSwathSizeBytesY <= 1.5 * RoundedUpSwathSizeBytesC) {
+ DETBufferSizeY[k] = DETBufferSizeInKByte * 1024 / 2;
+ DETBufferSizeC[k] = DETBufferSizeInKByte * 1024 / 2;
+ } else {
+ DETBufferSizeY[k] = DETBufferSizeInKByte * 1024 * 2 / 3;
+ DETBufferSizeC[k] = DETBufferSizeInKByte * 1024 / 3;
+ }
+
+ if (RoundedUpMinSwathSizeBytesY + RoundedUpMinSwathSizeBytesC
+ > DETBufferSizeInKByte * 1024 / 2
+ || SwathWidth[k] > MaximumSwathWidthLuma[k]
+ || (SwathHeightC[k] > 0
+ && SwathWidthChroma[k] > MaximumSwathWidthChroma[k])) {
+ *ViewportSizeSupport = false;
+ ViewportSizeSupportPerPlane[k] = false;
+ } else {
+ ViewportSizeSupportPerPlane[k] = true;
+ }
+ }
+}
+
+static void CalculateSwathWidth(
+ bool ForceSingleDPP,
+ int NumberOfActivePlanes,
+ enum source_format_class SourcePixelFormat[],
+ enum scan_direction_class SourceScan[],
+ unsigned int ViewportWidth[],
+ unsigned int ViewportHeight[],
+ unsigned int SurfaceWidthY[],
+ unsigned int SurfaceWidthC[],
+ unsigned int SurfaceHeightY[],
+ unsigned int SurfaceHeightC[],
+ enum odm_combine_mode ODMCombineEnabled[],
+ int BytePerPixY[],
+ int BytePerPixC[],
+ int Read256BytesBlockHeightY[],
+ int Read256BytesBlockHeightC[],
+ int Read256BytesBlockWidthY[],
+ int Read256BytesBlockWidthC[],
+ int BlendingAndTiming[],
+ unsigned int HActive[],
+ double HRatio[],
+ int DPPPerPlane[],
+ double SwathWidthSingleDPPY[],
+ double SwathWidthSingleDPPC[],
+ double SwathWidthY[],
+ double SwathWidthC[],
+ int MaximumSwathHeightY[],
+ int MaximumSwathHeightC[],
+ unsigned int swath_width_luma_ub[],
+ unsigned int swath_width_chroma_ub[])
+{
+ unsigned int k, j;
+ long surface_width_ub_l;
+ long surface_height_ub_l;
+ long surface_width_ub_c;
+ long surface_height_ub_c;
+
+ for (k = 0; k < NumberOfActivePlanes; ++k) {
+ enum odm_combine_mode MainPlaneODMCombine = 0;
+ surface_width_ub_l = dml_ceil(SurfaceWidthY[k], Read256BytesBlockWidthY[k]);
+ surface_height_ub_l = dml_ceil(SurfaceHeightY[k], Read256BytesBlockHeightY[k]);
+ surface_width_ub_c = dml_ceil(SurfaceWidthC[k], Read256BytesBlockWidthC[k]);
+ surface_height_ub_c = dml_ceil(SurfaceHeightC[k], Read256BytesBlockHeightC[k]);
+
+ if (SourceScan[k] != dm_vert) {
+ SwathWidthSingleDPPY[k] = ViewportWidth[k];
+ } else {
+ SwathWidthSingleDPPY[k] = ViewportHeight[k];
+ }
+
+ MainPlaneODMCombine = ODMCombineEnabled[k];
+ for (j = 0; j < NumberOfActivePlanes; ++j) {
+ if (BlendingAndTiming[k] == j) {
+ MainPlaneODMCombine = ODMCombineEnabled[j];
+ }
+ }
+
+ if (MainPlaneODMCombine == dm_odm_combine_mode_4to1) {
+ SwathWidthY[k] = dml_min(SwathWidthSingleDPPY[k], dml_round(HActive[k] / 4.0 * HRatio[k]));
+ } else if (MainPlaneODMCombine == dm_odm_combine_mode_2to1) {
+ SwathWidthY[k] = dml_min(SwathWidthSingleDPPY[k], dml_round(HActive[k] / 2.0 * HRatio[k]));
+ } else if (DPPPerPlane[k] == 2) {
+ SwathWidthY[k] = SwathWidthSingleDPPY[k] / 2;
+ } else {
+ SwathWidthY[k] = SwathWidthSingleDPPY[k];
+ }
+
+ if (SourcePixelFormat[k] == dm_420_8 || SourcePixelFormat[k] == dm_420_10 || SourcePixelFormat[k] == dm_420_12) {
+ SwathWidthC[k] = SwathWidthY[k] / 2;
+ SwathWidthSingleDPPC[k] = SwathWidthSingleDPPY[k] / 2;
+ } else {
+ SwathWidthC[k] = SwathWidthY[k];
+ SwathWidthSingleDPPC[k] = SwathWidthSingleDPPY[k];
+ }
+
+ if (ForceSingleDPP == true) {
+ SwathWidthY[k] = SwathWidthSingleDPPY[k];
+ SwathWidthC[k] = SwathWidthSingleDPPC[k];
+ }
+
+ surface_width_ub_l = dml_ceil(SurfaceWidthY[k], Read256BytesBlockWidthY[k]);
+ surface_height_ub_l = dml_ceil(SurfaceHeightY[k], Read256BytesBlockHeightY[k]);
+ surface_width_ub_c = dml_ceil(SurfaceWidthC[k], Read256BytesBlockWidthC[k]);
+ surface_height_ub_c = dml_ceil(SurfaceHeightC[k], Read256BytesBlockHeightC[k]);
+
+ if (SourceScan[k] != dm_vert) {
+ MaximumSwathHeightY[k] = Read256BytesBlockHeightY[k];
+ MaximumSwathHeightC[k] = Read256BytesBlockHeightC[k];
+ swath_width_luma_ub[k] = dml_min(surface_width_ub_l, (long) dml_ceil(SwathWidthY[k] - 1,
+ Read256BytesBlockWidthY[k]) + Read256BytesBlockWidthY[k]);
+ if (BytePerPixC[k] > 0) {
+ swath_width_chroma_ub[k] = dml_min(surface_width_ub_c, (long) dml_ceil(SwathWidthC[k] - 1,
+ Read256BytesBlockWidthC[k]) + Read256BytesBlockWidthC[k]);
+ } else {
+ swath_width_chroma_ub[k] = 0;
+ }
+ } else {
+ MaximumSwathHeightY[k] = Read256BytesBlockWidthY[k];
+ MaximumSwathHeightC[k] = Read256BytesBlockWidthC[k];
+ swath_width_luma_ub[k] = dml_min(surface_height_ub_l, (long) dml_ceil(SwathWidthY[k] - 1,
+ Read256BytesBlockHeightY[k]) + Read256BytesBlockHeightY[k]);
+ if (BytePerPixC[k] > 0) {
+ swath_width_chroma_ub[k] = dml_min(surface_height_ub_c, (long) dml_ceil(SwathWidthC[k] - 1,
+ Read256BytesBlockHeightC[k]) + Read256BytesBlockHeightC[k]);
+ } else {
+ swath_width_chroma_ub[k] = 0;
+ }
+ }
+ }
+}
+
+static double CalculateExtraLatency(
+ long RoundTripPingLatencyCycles,
+ long ReorderingBytes,
+ double DCFCLK,
+ int TotalNumberOfActiveDPP,
+ int PixelChunkSizeInKByte,
+ int TotalNumberOfDCCActiveDPP,
+ int MetaChunkSize,
+ double ReturnBW,
+ bool GPUVMEnable,
+ bool HostVMEnable,
+ int NumberOfActivePlanes,
+ int NumberOfDPP[],
+ int dpte_group_bytes[],
+ double PercentOfIdealDRAMFabricAndSDPPortBWReceivedAfterUrgLatencyPixelMixedWithVMData,
+ double PercentOfIdealDRAMFabricAndSDPPortBWReceivedAfterUrgLatencyVMDataOnly,
+ double HostVMMinPageSize,
+ int HostVMMaxNonCachedPageTableLevels)
+{
+ double ExtraLatencyBytes = 0;
+ ExtraLatencyBytes = CalculateExtraLatencyBytes(
+ ReorderingBytes,
+ TotalNumberOfActiveDPP,
+ PixelChunkSizeInKByte,
+ TotalNumberOfDCCActiveDPP,
+ MetaChunkSize,
+ GPUVMEnable,
+ HostVMEnable,
+ NumberOfActivePlanes,
+ NumberOfDPP,
+ dpte_group_bytes,
+ PercentOfIdealDRAMFabricAndSDPPortBWReceivedAfterUrgLatencyPixelMixedWithVMData,
+ PercentOfIdealDRAMFabricAndSDPPortBWReceivedAfterUrgLatencyVMDataOnly,
+ HostVMMinPageSize,
+ HostVMMaxNonCachedPageTableLevels);
+
+ return (RoundTripPingLatencyCycles + 32) / DCFCLK + ExtraLatencyBytes / ReturnBW;
+}
+
+static double CalculateExtraLatencyBytes(
+ long ReorderingBytes,
+ int TotalNumberOfActiveDPP,
+ int PixelChunkSizeInKByte,
+ int TotalNumberOfDCCActiveDPP,
+ int MetaChunkSize,
+ bool GPUVMEnable,
+ bool HostVMEnable,
+ int NumberOfActivePlanes,
+ int NumberOfDPP[],
+ int dpte_group_bytes[],
+ double PercentOfIdealDRAMFabricAndSDPPortBWReceivedAfterUrgLatencyPixelMixedWithVMData,
+ double PercentOfIdealDRAMFabricAndSDPPortBWReceivedAfterUrgLatencyVMDataOnly,
+ double HostVMMinPageSize,
+ int HostVMMaxNonCachedPageTableLevels)
+{
+ double ret = 0;
+ double HostVMInefficiencyFactor = 0;
+ int HostVMDynamicLevels = 0;
+ unsigned int k;
+
+ if (GPUVMEnable == true && HostVMEnable == true) {
+ HostVMInefficiencyFactor = PercentOfIdealDRAMFabricAndSDPPortBWReceivedAfterUrgLatencyPixelMixedWithVMData / PercentOfIdealDRAMFabricAndSDPPortBWReceivedAfterUrgLatencyVMDataOnly;
+ if (HostVMMinPageSize < 2048) {
+ HostVMDynamicLevels = HostVMMaxNonCachedPageTableLevels;
+ } else if (HostVMMinPageSize >= 2048 && HostVMMinPageSize < 1048576) {
+ HostVMDynamicLevels = dml_max(0, (int) HostVMMaxNonCachedPageTableLevels - 1);
+ } else {
+ HostVMDynamicLevels = dml_max(0, (int) HostVMMaxNonCachedPageTableLevels - 2);
+ }
+ } else {
+ HostVMInefficiencyFactor = 1;
+ HostVMDynamicLevels = 0;
+ }
+
+ ret = ReorderingBytes + (TotalNumberOfActiveDPP * PixelChunkSizeInKByte + TotalNumberOfDCCActiveDPP * MetaChunkSize) * 1024.0;
+
+ if (GPUVMEnable == true) {
+ for (k = 0; k < NumberOfActivePlanes; ++k) {
+ ret = ret + NumberOfDPP[k] * dpte_group_bytes[k] * (1 + 8 * HostVMDynamicLevels) * HostVMInefficiencyFactor;
+ }
+ }
+ return ret;
+}
+
+
+static double CalculateUrgentLatency(
+ double UrgentLatencyPixelDataOnly,
+ double UrgentLatencyPixelMixedWithVMData,
+ double UrgentLatencyVMDataOnly,
+ bool DoUrgentLatencyAdjustment,
+ double UrgentLatencyAdjustmentFabricClockComponent,
+ double UrgentLatencyAdjustmentFabricClockReference,
+ double FabricClock)
+{
+ double ret;
+
+ ret = dml_max3(UrgentLatencyPixelDataOnly, UrgentLatencyPixelMixedWithVMData, UrgentLatencyVMDataOnly);
+ if (DoUrgentLatencyAdjustment == true) {
+ ret = ret + UrgentLatencyAdjustmentFabricClockComponent * (UrgentLatencyAdjustmentFabricClockReference / FabricClock - 1);
+ }
+ return ret;
+}
+
+
+static void UseMinimumDCFCLK(
+ struct display_mode_lib *mode_lib,
+ int MaxInterDCNTileRepeaters,
+ int MaxPrefetchMode,
+ double FinalDRAMClockChangeLatency,
+ double SREnterPlusExitTime,
+ int ReturnBusWidth,
+ int RoundTripPingLatencyCycles,
+ int ReorderingBytes,
+ int PixelChunkSizeInKByte,
+ int MetaChunkSize,
+ bool GPUVMEnable,
+ int GPUVMMaxPageTableLevels,
+ bool HostVMEnable,
+ int NumberOfActivePlanes,
+ double HostVMMinPageSize,
+ int HostVMMaxNonCachedPageTableLevels,
+ bool DynamicMetadataVMEnabled,
+ enum immediate_flip_requirement ImmediateFlipRequirement,
+ bool ProgressiveToInterlaceUnitInOPP,
+ double MaxAveragePercentOfIdealSDPPortBWDisplayCanUseInNormalSystemOperation,
+ double PercentOfIdealDRAMFabricAndSDPPortBWReceivedAfterUrgLatencyPixelMixedWithVMData,
+ double PercentOfIdealDRAMFabricAndSDPPortBWReceivedAfterUrgLatencyVMDataOnly,
+ double PercentOfIdealDRAMFabricAndSDPPortBWReceivedAfterUrgLatencyPixelDataOnly,
+ int VTotal[],
+ int VActive[],
+ int DynamicMetadataTransmittedBytes[],
+ int DynamicMetadataLinesBeforeActiveRequired[],
+ bool Interlace[],
+ double RequiredDPPCLK[][2][DC__NUM_DPP__MAX],
+ double RequiredDISPCLK[][2],
+ double UrgLatency[],
+ unsigned int NoOfDPP[][2][DC__NUM_DPP__MAX],
+ double ProjectedDCFCLKDeepSleep[][2],
+ double MaximumVStartup[][2][DC__NUM_DPP__MAX],
+ double TotalVActivePixelBandwidth[][2],
+ double TotalVActiveCursorBandwidth[][2],
+ double TotalMetaRowBandwidth[][2],
+ double TotalDPTERowBandwidth[][2],
+ unsigned int TotalNumberOfActiveDPP[][2],
+ unsigned int TotalNumberOfDCCActiveDPP[][2],
+ int dpte_group_bytes[],
+ double PrefetchLinesY[][2][DC__NUM_DPP__MAX],
+ double PrefetchLinesC[][2][DC__NUM_DPP__MAX],
+ int swath_width_luma_ub_all_states[][2][DC__NUM_DPP__MAX],
+ int swath_width_chroma_ub_all_states[][2][DC__NUM_DPP__MAX],
+ int BytePerPixelY[],
+ int BytePerPixelC[],
+ int HTotal[],
+ double PixelClock[],
+ double PDEAndMetaPTEBytesPerFrame[][2][DC__NUM_DPP__MAX],
+ double DPTEBytesPerRow[][2][DC__NUM_DPP__MAX],
+ double MetaRowBytes[][2][DC__NUM_DPP__MAX],
+ bool DynamicMetadataEnable[],
+ double VActivePixelBandwidth[][2][DC__NUM_DPP__MAX],
+ double VActiveCursorBandwidth[][2][DC__NUM_DPP__MAX],
+ double ReadBandwidthLuma[],
+ double ReadBandwidthChroma[],
+ double DCFCLKPerState[],
+ double DCFCLKState[][2])
+{
+ double NormalEfficiency = 0;
+ double PTEEfficiency = 0;
+ double TotalMaxPrefetchFlipDPTERowBandwidth[DC__VOLTAGE_STATES][2] = { { 0 } };
+ unsigned int i, j, k;
+
+ NormalEfficiency = (HostVMEnable == true ? PercentOfIdealDRAMFabricAndSDPPortBWReceivedAfterUrgLatencyPixelMixedWithVMData
+ : PercentOfIdealDRAMFabricAndSDPPortBWReceivedAfterUrgLatencyPixelDataOnly) / 100.0;
+ PTEEfficiency = (HostVMEnable == true ? PercentOfIdealDRAMFabricAndSDPPortBWReceivedAfterUrgLatencyVMDataOnly
+ / PercentOfIdealDRAMFabricAndSDPPortBWReceivedAfterUrgLatencyPixelMixedWithVMData : 1.0);
+ for (i = 0; i < mode_lib->soc.num_states; ++i) {
+ for (j = 0; j <= 1; ++j) {
+ double PixelDCFCLKCyclesRequiredInPrefetch[DC__NUM_DPP__MAX] = { 0 };
+ double PrefetchPixelLinesTime[DC__NUM_DPP__MAX] = { 0 };
+ double DCFCLKRequiredForPeakBandwidthPerPlane[DC__NUM_DPP__MAX] = { 0 };
+ double DynamicMetadataVMExtraLatency[DC__NUM_DPP__MAX] = { 0 };
+ double MinimumTWait = 0;
+ double NonDPTEBandwidth = 0;
+ double DPTEBandwidth = 0;
+ double DCFCLKRequiredForAverageBandwidth = 0;
+ double ExtraLatencyBytes = 0;
+ double ExtraLatencyCycles = 0;
+ double DCFCLKRequiredForPeakBandwidth = 0;
+ int NoOfDPPState[DC__NUM_DPP__MAX] = { 0 };
+ double MinimumTvmPlus2Tr0 = 0;
+
+ TotalMaxPrefetchFlipDPTERowBandwidth[i][j] = 0;
+ for (k = 0; k < NumberOfActivePlanes; ++k) {
+ TotalMaxPrefetchFlipDPTERowBandwidth[i][j] = TotalMaxPrefetchFlipDPTERowBandwidth[i][j]
+ + NoOfDPP[i][j][k] * DPTEBytesPerRow[i][j][k] / (15.75 * HTotal[k] / PixelClock[k]);
+ }
+
+ for (k = 0; k <= NumberOfActivePlanes - 1; ++k) {
+ NoOfDPPState[k] = NoOfDPP[i][j][k];
+ }
+
+ MinimumTWait = CalculateTWait(MaxPrefetchMode, FinalDRAMClockChangeLatency, UrgLatency[i], SREnterPlusExitTime);
+ NonDPTEBandwidth = TotalVActivePixelBandwidth[i][j] + TotalVActiveCursorBandwidth[i][j] + TotalMetaRowBandwidth[i][j];
+ DPTEBandwidth = (HostVMEnable == true || ImmediateFlipRequirement == dm_immediate_flip_required) ?
+ TotalMaxPrefetchFlipDPTERowBandwidth[i][j] : TotalDPTERowBandwidth[i][j];
+ DCFCLKRequiredForAverageBandwidth = dml_max3(ProjectedDCFCLKDeepSleep[i][j],
+ (NonDPTEBandwidth + TotalDPTERowBandwidth[i][j]) / ReturnBusWidth / (MaxAveragePercentOfIdealSDPPortBWDisplayCanUseInNormalSystemOperation / 100),
+ (NonDPTEBandwidth + DPTEBandwidth / PTEEfficiency) / NormalEfficiency / ReturnBusWidth);
+
+ ExtraLatencyBytes = CalculateExtraLatencyBytes(ReorderingBytes, TotalNumberOfActiveDPP[i][j], PixelChunkSizeInKByte, TotalNumberOfDCCActiveDPP[i][j],
+ MetaChunkSize, GPUVMEnable, HostVMEnable, NumberOfActivePlanes, NoOfDPPState, dpte_group_bytes,
+ PercentOfIdealDRAMFabricAndSDPPortBWReceivedAfterUrgLatencyPixelMixedWithVMData, PercentOfIdealDRAMFabricAndSDPPortBWReceivedAfterUrgLatencyVMDataOnly,
+ HostVMMinPageSize, HostVMMaxNonCachedPageTableLevels);
+ ExtraLatencyCycles = RoundTripPingLatencyCycles + 32 + ExtraLatencyBytes / NormalEfficiency / ReturnBusWidth;
+ for (k = 0; k < NumberOfActivePlanes; ++k) {
+ double DCFCLKCyclesRequiredInPrefetch = { 0 };
+ double ExpectedPrefetchBWAcceleration = { 0 };
+ double PrefetchTime = { 0 };
+
+ PixelDCFCLKCyclesRequiredInPrefetch[k] = (PrefetchLinesY[i][j][k] * swath_width_luma_ub_all_states[i][j][k] * BytePerPixelY[k]
+ + PrefetchLinesC[i][j][k] * swath_width_chroma_ub_all_states[i][j][k] * BytePerPixelC[k]) / NormalEfficiency / ReturnBusWidth;
+ DCFCLKCyclesRequiredInPrefetch = 2 * ExtraLatencyCycles / NoOfDPPState[k] + PDEAndMetaPTEBytesPerFrame[i][j][k] / PTEEfficiency
+ / NormalEfficiency / ReturnBusWidth * (GPUVMMaxPageTableLevels > 2 ? 1 : 0) + 2 * DPTEBytesPerRow[i][j][k] / PTEEfficiency
+ / NormalEfficiency / ReturnBusWidth + 2 * MetaRowBytes[i][j][k] / NormalEfficiency / ReturnBusWidth + PixelDCFCLKCyclesRequiredInPrefetch[k];
+ PrefetchPixelLinesTime[k] = dml_max(PrefetchLinesY[i][j][k], PrefetchLinesC[i][j][k]) * HTotal[k] / PixelClock[k];
+ ExpectedPrefetchBWAcceleration = (VActivePixelBandwidth[i][j][k] + VActiveCursorBandwidth[i][j][k]) / (ReadBandwidthLuma[k] + ReadBandwidthChroma[k]);
+ DynamicMetadataVMExtraLatency[k] = (GPUVMEnable == true && DynamicMetadataEnable[k] == true && DynamicMetadataVMEnabled == true) ?
+ UrgLatency[i] * GPUVMMaxPageTableLevels * (HostVMEnable == true ? HostVMMaxNonCachedPageTableLevels + 1 : 1) : 0;
+ PrefetchTime = (MaximumVStartup[i][j][k] - 1) * HTotal[k] / PixelClock[k] - MinimumTWait - UrgLatency[i] * ((GPUVMMaxPageTableLevels <= 2 ? GPUVMMaxPageTableLevels
+ : GPUVMMaxPageTableLevels - 2) * (HostVMEnable == true ? HostVMMaxNonCachedPageTableLevels + 1 : 1) - 1) - DynamicMetadataVMExtraLatency[k];
+
+ if (PrefetchTime > 0) {
+ double ExpectedVRatioPrefetch = { 0 };
+ ExpectedVRatioPrefetch = PrefetchPixelLinesTime[k] / (PrefetchTime * PixelDCFCLKCyclesRequiredInPrefetch[k] / DCFCLKCyclesRequiredInPrefetch);
+ DCFCLKRequiredForPeakBandwidthPerPlane[k] = NoOfDPPState[k] * PixelDCFCLKCyclesRequiredInPrefetch[k] / PrefetchPixelLinesTime[k]
+ * dml_max(1.0, ExpectedVRatioPrefetch) * dml_max(1.0, ExpectedVRatioPrefetch / 4) * ExpectedPrefetchBWAcceleration;
+ if (HostVMEnable == true || ImmediateFlipRequirement == dm_immediate_flip_required) {
+ DCFCLKRequiredForPeakBandwidthPerPlane[k] = DCFCLKRequiredForPeakBandwidthPerPlane[k]
+ + NoOfDPPState[k] * DPTEBandwidth / PTEEfficiency / NormalEfficiency / ReturnBusWidth;
+ }
+ } else {
+ DCFCLKRequiredForPeakBandwidthPerPlane[k] = DCFCLKPerState[i];
+ }
+ if (DynamicMetadataEnable[k] == true) {
+ double TsetupPipe = { 0 };
+ double TdmbfPipe = { 0 };
+ double TdmsksPipe = { 0 };
+ double TdmecPipe = { 0 };
+ double AllowedTimeForUrgentExtraLatency = { 0 };
+
+ CalculateDynamicMetadataParameters(
+ MaxInterDCNTileRepeaters,
+ RequiredDPPCLK[i][j][k],
+ RequiredDISPCLK[i][j],
+ ProjectedDCFCLKDeepSleep[i][j],
+ PixelClock[k],
+ HTotal[k],
+ VTotal[k] - VActive[k],
+ DynamicMetadataTransmittedBytes[k],
+ DynamicMetadataLinesBeforeActiveRequired[k],
+ Interlace[k],
+ ProgressiveToInterlaceUnitInOPP,
+ &TsetupPipe,
+ &TdmbfPipe,
+ &TdmecPipe,
+ &TdmsksPipe);
+ AllowedTimeForUrgentExtraLatency = MaximumVStartup[i][j][k] * HTotal[k] / PixelClock[k] - MinimumTWait - TsetupPipe
+ - TdmbfPipe - TdmecPipe - TdmsksPipe - DynamicMetadataVMExtraLatency[k];
+ if (AllowedTimeForUrgentExtraLatency > 0) {
+ DCFCLKRequiredForPeakBandwidthPerPlane[k] = dml_max(DCFCLKRequiredForPeakBandwidthPerPlane[k],
+ ExtraLatencyCycles / AllowedTimeForUrgentExtraLatency);
+ } else {
+ DCFCLKRequiredForPeakBandwidthPerPlane[k] = DCFCLKPerState[i];
+ }
+ }
+ }
+ DCFCLKRequiredForPeakBandwidth = 0;
+ for (k = 0; k <= NumberOfActivePlanes - 1; ++k) {
+ DCFCLKRequiredForPeakBandwidth = DCFCLKRequiredForPeakBandwidth + DCFCLKRequiredForPeakBandwidthPerPlane[k];
+ }
+ MinimumTvmPlus2Tr0 = UrgLatency[i] * (GPUVMEnable == true ? (HostVMEnable == true ?
+ (GPUVMMaxPageTableLevels + 2) * (HostVMMaxNonCachedPageTableLevels + 1) - 1 : GPUVMMaxPageTableLevels + 1) : 0);
+ for (k = 0; k < NumberOfActivePlanes; ++k) {
+ double MaximumTvmPlus2Tr0PlusTsw = { 0 };
+ MaximumTvmPlus2Tr0PlusTsw = (MaximumVStartup[i][j][k] - 2) * HTotal[k] / PixelClock[k] - MinimumTWait - DynamicMetadataVMExtraLatency[k];
+ if (MaximumTvmPlus2Tr0PlusTsw <= MinimumTvmPlus2Tr0 + PrefetchPixelLinesTime[k] / 4) {
+ DCFCLKRequiredForPeakBandwidth = DCFCLKPerState[i];
+ } else {
+ DCFCLKRequiredForPeakBandwidth = dml_max3(DCFCLKRequiredForPeakBandwidth, 2 * ExtraLatencyCycles
+ / (MaximumTvmPlus2Tr0PlusTsw - MinimumTvmPlus2Tr0 - PrefetchPixelLinesTime[k] / 4),
+ (2 * ExtraLatencyCycles + PixelDCFCLKCyclesRequiredInPrefetch[k]) / (MaximumTvmPlus2Tr0PlusTsw - MinimumTvmPlus2Tr0));
+ }
+ }
+ DCFCLKState[i][j] = dml_min(DCFCLKPerState[i], 1.05 * (1 + mode_lib->vba.PercentMarginOverMinimumRequiredDCFCLK / 100)
+ * dml_max(DCFCLKRequiredForAverageBandwidth, DCFCLKRequiredForPeakBandwidth));
+ }
+ }
+}
+
+#endif /* CONFIG_DRM_AMD_DC_DCN3_0 */
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn30/display_mode_vba_30.h b/drivers/gpu/drm/amd/display/dc/dml/dcn30/display_mode_vba_30.h
new file mode 100644
index 000000000000..4e249eaabfdb
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn30/display_mode_vba_30.h
@@ -0,0 +1,43 @@
+/*
+ * Copyright 2020 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef __DML30_DISPLAY_MODE_VBA_H__
+#define __DML30_DISPLAY_MODE_VBA_H__
+
+void dml30_recalculate(struct display_mode_lib *mode_lib);
+void dml30_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_lib);
+double dml30_CalculateWriteBackDISPCLK(
+ enum source_format_class WritebackPixelFormat,
+ double PixelClock,
+ double WritebackHRatio,
+ double WritebackVRatio,
+ unsigned int WritebackHTaps,
+ unsigned int WritebackVTaps,
+ long WritebackSourceWidth,
+ long WritebackDestinationWidth,
+ unsigned int HTotal,
+ unsigned int WritebackLineBufferSize);
+
+#endif /* __DML30_DISPLAY_MODE_VBA_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn30/display_rq_dlg_calc_30.c b/drivers/gpu/drm/amd/display/dc/dml/dcn30/display_rq_dlg_calc_30.c
new file mode 100644
index 000000000000..5bb10f6e300d
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn30/display_rq_dlg_calc_30.c
@@ -0,0 +1,1868 @@
+/*
+ * Copyright 2020 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifdef CONFIG_DRM_AMD_DC_DCN3_0
+
+#include "../display_mode_lib.h"
+#include "../display_mode_vba.h"
+#include "../dml_inline_defs.h"
+#include "display_rq_dlg_calc_30.h"
+
+static bool is_dual_plane(enum source_format_class source_format)
+{
+ bool ret_val = 0;
+
+ if ((source_format == dm_420_12) || (source_format == dm_420_8) || (source_format == dm_420_10) || (source_format == dm_rgbe_alpha))
+ ret_val = 1;
+
+ return ret_val;
+}
+
+static double get_refcyc_per_delivery(struct display_mode_lib *mode_lib,
+ double refclk_freq_in_mhz,
+ double pclk_freq_in_mhz,
+ unsigned int odm_combine,
+ unsigned int recout_width,
+ unsigned int hactive,
+ double vratio,
+ double hscale_pixel_rate,
+ unsigned int delivery_width,
+ unsigned int req_per_swath_ub)
+{
+ double refcyc_per_delivery = 0.0;
+
+ if (vratio <= 1.0) {
+ if (odm_combine)
+ refcyc_per_delivery = (double)refclk_freq_in_mhz * (double)((unsigned int)odm_combine*2)
+ * dml_min((double)recout_width, (double)hactive / ((unsigned int)odm_combine*2))
+ / pclk_freq_in_mhz / (double)req_per_swath_ub;
+ else
+ refcyc_per_delivery = (double)refclk_freq_in_mhz * (double)recout_width
+ / pclk_freq_in_mhz / (double)req_per_swath_ub;
+ } else {
+ refcyc_per_delivery = (double)refclk_freq_in_mhz * (double)delivery_width
+ / (double)hscale_pixel_rate / (double)req_per_swath_ub;
+ }
+
+ dml_print("DML_DLG: %s: refclk_freq_in_mhz = %3.2f\n", __func__, refclk_freq_in_mhz);
+ dml_print("DML_DLG: %s: pclk_freq_in_mhz = %3.2f\n", __func__, pclk_freq_in_mhz);
+ dml_print("DML_DLG: %s: recout_width = %d\n", __func__, recout_width);
+ dml_print("DML_DLG: %s: vratio = %3.2f\n", __func__, vratio);
+ dml_print("DML_DLG: %s: req_per_swath_ub = %d\n", __func__, req_per_swath_ub);
+ dml_print("DML_DLG: %s: refcyc_per_delivery= %3.2f\n", __func__, refcyc_per_delivery);
+
+ return refcyc_per_delivery;
+
+}
+
+static unsigned int get_blk_size_bytes(const enum source_macro_tile_size tile_size)
+{
+ if (tile_size == dm_256k_tile)
+ return (256 * 1024);
+ else if (tile_size == dm_64k_tile)
+ return (64 * 1024);
+ else
+ return (4 * 1024);
+}
+
+static void extract_rq_sizing_regs(struct display_mode_lib *mode_lib,
+ display_data_rq_regs_st *rq_regs,
+ const display_data_rq_sizing_params_st rq_sizing)
+{
+ dml_print("DML_DLG: %s: rq_sizing param\n", __func__);
+ print__data_rq_sizing_params_st(mode_lib, rq_sizing);
+
+ rq_regs->chunk_size = dml_log2(rq_sizing.chunk_bytes) - 10;
+
+ if (rq_sizing.min_chunk_bytes == 0)
+ rq_regs->min_chunk_size = 0;
+ else
+ rq_regs->min_chunk_size = dml_log2(rq_sizing.min_chunk_bytes) - 8 + 1;
+
+ rq_regs->meta_chunk_size = dml_log2(rq_sizing.meta_chunk_bytes) - 10;
+ if (rq_sizing.min_meta_chunk_bytes == 0)
+ rq_regs->min_meta_chunk_size = 0;
+ else
+ rq_regs->min_meta_chunk_size = dml_log2(rq_sizing.min_meta_chunk_bytes) - 6 + 1;
+
+ rq_regs->dpte_group_size = dml_log2(rq_sizing.dpte_group_bytes) - 6;
+ rq_regs->mpte_group_size = dml_log2(rq_sizing.mpte_group_bytes) - 6;
+}
+
+static void extract_rq_regs(struct display_mode_lib *mode_lib,
+ display_rq_regs_st *rq_regs,
+ const display_rq_params_st rq_param)
+{
+ unsigned int detile_buf_size_in_bytes = mode_lib->ip.det_buffer_size_kbytes * 1024;
+ unsigned int detile_buf_plane1_addr = 0;
+
+ extract_rq_sizing_regs(mode_lib, &(rq_regs->rq_regs_l), rq_param.sizing.rq_l);
+
+ rq_regs->rq_regs_l.pte_row_height_linear = dml_floor(dml_log2(rq_param.dlg.rq_l.dpte_row_height),
+ 1) - 3;
+
+ if (rq_param.yuv420) {
+ extract_rq_sizing_regs(mode_lib, &(rq_regs->rq_regs_c), rq_param.sizing.rq_c);
+ rq_regs->rq_regs_c.pte_row_height_linear = dml_floor(dml_log2(rq_param.dlg.rq_c.dpte_row_height),
+ 1) - 3;
+ }
+
+ rq_regs->rq_regs_l.swath_height = dml_log2(rq_param.dlg.rq_l.swath_height);
+ rq_regs->rq_regs_c.swath_height = dml_log2(rq_param.dlg.rq_c.swath_height);
+
+ // FIXME: take the max between luma, chroma chunk size?
+ // okay for now, as we are setting chunk_bytes to 8kb anyways
+ if (rq_param.sizing.rq_l.chunk_bytes >= 32 * 1024 || (rq_param.yuv420 && rq_param.sizing.rq_c.chunk_bytes >= 32 * 1024)) { //32kb
+ rq_regs->drq_expansion_mode = 0;
+ } else {
+ rq_regs->drq_expansion_mode = 2;
+ }
+ rq_regs->prq_expansion_mode = 1;
+ rq_regs->mrq_expansion_mode = 1;
+ rq_regs->crq_expansion_mode = 1;
+
+ if (rq_param.yuv420) {
+ if ((double)rq_param.misc.rq_l.stored_swath_bytes
+ / (double)rq_param.misc.rq_c.stored_swath_bytes <= 1.5) {
+ detile_buf_plane1_addr = (detile_buf_size_in_bytes / 2.0 / 64.0); // half to chroma
+ } else {
+ detile_buf_plane1_addr = dml_round_to_multiple((unsigned int)((2.0 * detile_buf_size_in_bytes) / 3.0),
+ 256,
+ 0) / 64.0; // 2/3 to chroma
+ }
+ }
+ rq_regs->plane1_base_address = detile_buf_plane1_addr;
+}
+
+static void handle_det_buf_split(struct display_mode_lib *mode_lib,
+ display_rq_params_st *rq_param,
+ const display_pipe_source_params_st pipe_src_param)
+{
+ unsigned int total_swath_bytes = 0;
+ unsigned int swath_bytes_l = 0;
+ unsigned int swath_bytes_c = 0;
+ unsigned int full_swath_bytes_packed_l = 0;
+ unsigned int full_swath_bytes_packed_c = 0;
+ bool req128_l = 0;
+ bool req128_c = 0;
+ bool surf_linear = (pipe_src_param.sw_mode == dm_sw_linear);
+ bool surf_vert = (pipe_src_param.source_scan == dm_vert);
+ unsigned int log2_swath_height_l = 0;
+ unsigned int log2_swath_height_c = 0;
+ unsigned int detile_buf_size_in_bytes = mode_lib->ip.det_buffer_size_kbytes * 1024;
+
+ full_swath_bytes_packed_l = rq_param->misc.rq_l.full_swath_bytes;
+ full_swath_bytes_packed_c = rq_param->misc.rq_c.full_swath_bytes;
+
+ if (rq_param->yuv420_10bpc) {
+ full_swath_bytes_packed_l = dml_round_to_multiple(rq_param->misc.rq_l.full_swath_bytes * 2.0 / 3.0,
+ 256,
+ 1) + 256;
+ full_swath_bytes_packed_c = dml_round_to_multiple(rq_param->misc.rq_c.full_swath_bytes * 2.0 / 3.0,
+ 256,
+ 1) + 256;
+ }
+
+ if (rq_param->yuv420)
+ total_swath_bytes = 2 * full_swath_bytes_packed_l + 2 * full_swath_bytes_packed_c;
+ else
+ total_swath_bytes = 2 * full_swath_bytes_packed_l;
+
+ if (total_swath_bytes <= detile_buf_size_in_bytes) { //full 256b request
+ req128_l = 0;
+ req128_c = 0;
+ swath_bytes_l = full_swath_bytes_packed_l;
+ swath_bytes_c = full_swath_bytes_packed_c;
+ } else if (!rq_param->yuv420) {
+ req128_l = 1;
+ req128_c = 0;
+ swath_bytes_c = full_swath_bytes_packed_c;
+ swath_bytes_l = full_swath_bytes_packed_l / 2;
+ } else if ((double)full_swath_bytes_packed_l / (double)full_swath_bytes_packed_c < 1.5) {
+ req128_l = 0;
+ req128_c = 1;
+ swath_bytes_l = full_swath_bytes_packed_l;
+ swath_bytes_c = full_swath_bytes_packed_c / 2;
+
+ total_swath_bytes = 2 * swath_bytes_l + 2 * swath_bytes_c;
+
+ if (total_swath_bytes > detile_buf_size_in_bytes) {
+ req128_l = 1;
+ swath_bytes_l = full_swath_bytes_packed_l / 2;
+ }
+ } else {
+ req128_l = 1;
+ req128_c = 0;
+ swath_bytes_l = full_swath_bytes_packed_l/2;
+ swath_bytes_c = full_swath_bytes_packed_c;
+
+ total_swath_bytes = 2 * swath_bytes_l + 2 * swath_bytes_c;
+
+ if (total_swath_bytes > detile_buf_size_in_bytes) {
+ req128_c = 1;
+ swath_bytes_c = full_swath_bytes_packed_c/2;
+ }
+ }
+
+ if (rq_param->yuv420)
+ total_swath_bytes = 2 * swath_bytes_l + 2 * swath_bytes_c;
+ else
+ total_swath_bytes = 2 * swath_bytes_l;
+
+ rq_param->misc.rq_l.stored_swath_bytes = swath_bytes_l;
+ rq_param->misc.rq_c.stored_swath_bytes = swath_bytes_c;
+
+ if (surf_linear) {
+ log2_swath_height_l = 0;
+ log2_swath_height_c = 0;
+ } else if (!surf_vert) {
+ log2_swath_height_l = dml_log2(rq_param->misc.rq_l.blk256_height) - req128_l;
+ log2_swath_height_c = dml_log2(rq_param->misc.rq_c.blk256_height) - req128_c;
+ } else {
+ log2_swath_height_l = dml_log2(rq_param->misc.rq_l.blk256_width) - req128_l;
+ log2_swath_height_c = dml_log2(rq_param->misc.rq_c.blk256_width) - req128_c;
+ }
+ rq_param->dlg.rq_l.swath_height = 1 << log2_swath_height_l;
+ rq_param->dlg.rq_c.swath_height = 1 << log2_swath_height_c;
+
+ dml_print("DML_DLG: %s: req128_l = %0d\n", __func__, req128_l);
+ dml_print("DML_DLG: %s: req128_c = %0d\n", __func__, req128_c);
+ dml_print("DML_DLG: %s: full_swath_bytes_packed_l = %0d\n",
+ __func__,
+ full_swath_bytes_packed_l);
+ dml_print("DML_DLG: %s: full_swath_bytes_packed_c = %0d\n",
+ __func__,
+ full_swath_bytes_packed_c);
+}
+
+static bool CalculateBytePerPixelAnd256BBlockSizes(
+ enum source_format_class SourcePixelFormat,
+ enum dm_swizzle_mode SurfaceTiling,
+ unsigned int *BytePerPixelY,
+ unsigned int *BytePerPixelC,
+ double *BytePerPixelDETY,
+ double *BytePerPixelDETC,
+ unsigned int *BlockHeight256BytesY,
+ unsigned int *BlockHeight256BytesC,
+ unsigned int *BlockWidth256BytesY,
+ unsigned int *BlockWidth256BytesC)
+{
+ if (SourcePixelFormat == dm_444_64) {
+ *BytePerPixelDETY = 8;
+ *BytePerPixelDETC = 0;
+ *BytePerPixelY = 8;
+ *BytePerPixelC = 0;
+ } else if (SourcePixelFormat == dm_444_32 || SourcePixelFormat == dm_rgbe) {
+ *BytePerPixelDETY = 4;
+ *BytePerPixelDETC = 0;
+ *BytePerPixelY = 4;
+ *BytePerPixelC = 0;
+ } else if (SourcePixelFormat == dm_444_16 || SourcePixelFormat == dm_444_16) {
+ *BytePerPixelDETY = 2;
+ *BytePerPixelDETC = 0;
+ *BytePerPixelY = 2;
+ *BytePerPixelC = 0;
+ } else if (SourcePixelFormat == dm_444_8) {
+ *BytePerPixelDETY = 1;
+ *BytePerPixelDETC = 0;
+ *BytePerPixelY = 1;
+ *BytePerPixelC = 0;
+ } else if (SourcePixelFormat == dm_rgbe_alpha) {
+ *BytePerPixelDETY = 4;
+ *BytePerPixelDETC = 1;
+ *BytePerPixelY = 4;
+ *BytePerPixelC = 1;
+ } else if (SourcePixelFormat == dm_420_8) {
+ *BytePerPixelDETY = 1;
+ *BytePerPixelDETC = 2;
+ *BytePerPixelY = 1;
+ *BytePerPixelC = 2;
+ } else if (SourcePixelFormat == dm_420_12) {
+ *BytePerPixelDETY = 2;
+ *BytePerPixelDETC = 4;
+ *BytePerPixelY = 2;
+ *BytePerPixelC = 4;
+ } else {
+ *BytePerPixelDETY = 4.0 / 3;
+ *BytePerPixelDETC = 8.0 / 3;
+ *BytePerPixelY = 2;
+ *BytePerPixelC = 4;
+ }
+
+ if ((SourcePixelFormat == dm_444_64 || SourcePixelFormat == dm_444_32
+ || SourcePixelFormat == dm_444_16 || SourcePixelFormat == dm_444_8
+ || SourcePixelFormat == dm_mono_16 || SourcePixelFormat == dm_mono_8
+ || SourcePixelFormat == dm_rgbe)) {
+ if (SurfaceTiling == dm_sw_linear) {
+ *BlockHeight256BytesY = 1;
+ } else if (SourcePixelFormat == dm_444_64) {
+ *BlockHeight256BytesY = 4;
+ } else if (SourcePixelFormat == dm_444_8) {
+ *BlockHeight256BytesY = 16;
+ } else {
+ *BlockHeight256BytesY = 8;
+ }
+ *BlockWidth256BytesY = 256U / *BytePerPixelY / *BlockHeight256BytesY;
+ *BlockHeight256BytesC = 0;
+ *BlockWidth256BytesC = 0;
+ } else {
+ if (SurfaceTiling == dm_sw_linear) {
+ *BlockHeight256BytesY = 1;
+ *BlockHeight256BytesC = 1;
+ } else if (SourcePixelFormat == dm_rgbe_alpha) {
+ *BlockHeight256BytesY = 8;
+ *BlockHeight256BytesC = 16;
+ } else if (SourcePixelFormat == dm_420_8) {
+ *BlockHeight256BytesY = 16;
+ *BlockHeight256BytesC = 8;
+ } else {
+ *BlockHeight256BytesY = 8;
+ *BlockHeight256BytesC = 8;
+ }
+ *BlockWidth256BytesY = 256U / *BytePerPixelY / *BlockHeight256BytesY;
+ *BlockWidth256BytesC = 256U / *BytePerPixelC / *BlockHeight256BytesC;
+ }
+ return true;
+}
+
+static void get_meta_and_pte_attr(struct display_mode_lib *mode_lib,
+ display_data_rq_dlg_params_st *rq_dlg_param,
+ display_data_rq_misc_params_st *rq_misc_param,
+ display_data_rq_sizing_params_st *rq_sizing_param,
+ unsigned int vp_width,
+ unsigned int vp_height,
+ unsigned int data_pitch,
+ unsigned int meta_pitch,
+ unsigned int source_format,
+ unsigned int tiling,
+ unsigned int macro_tile_size,
+ unsigned int source_scan,
+ unsigned int hostvm_enable,
+ unsigned int is_chroma,
+ unsigned int surface_height)
+{
+ bool surf_linear = (tiling == dm_sw_linear);
+ bool surf_vert = (source_scan == dm_vert);
+
+ unsigned int bytes_per_element = 0;
+ unsigned int bytes_per_element_y = 0;
+ unsigned int bytes_per_element_c = 0;
+
+ unsigned int blk256_width = 0;
+ unsigned int blk256_height = 0;
+
+ unsigned int blk256_width_y = 0;
+ unsigned int blk256_height_y = 0;
+ unsigned int blk256_width_c = 0;
+ unsigned int blk256_height_c = 0;
+ unsigned int log2_bytes_per_element = 0;
+ unsigned int log2_blk256_width = 0;
+ unsigned int log2_blk256_height = 0;
+ unsigned int blk_bytes = 0;
+ unsigned int log2_blk_bytes = 0;
+ unsigned int log2_blk_height = 0;
+ unsigned int log2_blk_width = 0;
+ unsigned int log2_meta_req_bytes = 0;
+ unsigned int log2_meta_req_height = 0;
+ unsigned int log2_meta_req_width = 0;
+ unsigned int meta_req_width = 0;
+ unsigned int meta_req_height = 0;
+ unsigned int log2_meta_row_height = 0;
+ unsigned int meta_row_width_ub = 0;
+ unsigned int log2_meta_chunk_bytes = 0;
+ unsigned int log2_meta_chunk_height = 0;
+
+ //full sized meta chunk width in unit of data elements
+ unsigned int log2_meta_chunk_width = 0;
+ unsigned int log2_min_meta_chunk_bytes = 0;
+ unsigned int min_meta_chunk_width = 0;
+ unsigned int meta_chunk_width = 0;
+ unsigned int meta_chunk_per_row_int = 0;
+ unsigned int meta_row_remainder = 0;
+ unsigned int meta_chunk_threshold = 0;
+ unsigned int meta_blk_bytes = 0;
+ unsigned int meta_blk_height = 0;
+ unsigned int meta_blk_width = 0;
+ unsigned int meta_surface_bytes = 0;
+ unsigned int vmpg_bytes = 0;
+ unsigned int meta_pte_req_per_frame_ub = 0;
+ unsigned int meta_pte_bytes_per_frame_ub = 0;
+ const unsigned int log2_vmpg_bytes = dml_log2(mode_lib->soc.gpuvm_min_page_size_bytes);
+ const bool dual_plane_en = is_dual_plane((enum source_format_class)(source_format));
+ const unsigned int dpte_buf_in_pte_reqs = dual_plane_en ?
+ (is_chroma ? mode_lib->ip.dpte_buffer_size_in_pte_reqs_chroma : mode_lib->ip.dpte_buffer_size_in_pte_reqs_luma)
+ : (mode_lib->ip.dpte_buffer_size_in_pte_reqs_luma + mode_lib->ip.dpte_buffer_size_in_pte_reqs_chroma);
+
+ unsigned int log2_vmpg_height = 0;
+ unsigned int log2_vmpg_width = 0;
+ unsigned int log2_dpte_req_height_ptes = 0;
+ unsigned int log2_dpte_req_height = 0;
+ unsigned int log2_dpte_req_width = 0;
+ unsigned int log2_dpte_row_height_linear = 0;
+ unsigned int log2_dpte_row_height = 0;
+ unsigned int log2_dpte_group_width = 0;
+ unsigned int dpte_row_width_ub = 0;
+ unsigned int dpte_req_height = 0;
+ unsigned int dpte_req_width = 0;
+ unsigned int dpte_group_width = 0;
+ unsigned int log2_dpte_group_bytes = 0;
+ unsigned int log2_dpte_group_length = 0;
+ double byte_per_pixel_det_y = 0;
+ double byte_per_pixel_det_c = 0;
+
+ CalculateBytePerPixelAnd256BBlockSizes((enum source_format_class)(source_format),
+ (enum dm_swizzle_mode)(tiling),
+ &bytes_per_element_y,
+ &bytes_per_element_c,
+ &byte_per_pixel_det_y,
+ &byte_per_pixel_det_c,
+ &blk256_height_y,
+ &blk256_height_c,
+ &blk256_width_y,
+ &blk256_width_c);
+
+ if (!is_chroma) {
+ blk256_width = blk256_width_y;
+ blk256_height = blk256_height_y;
+ bytes_per_element = bytes_per_element_y;
+ } else {
+ blk256_width = blk256_width_c;
+ blk256_height = blk256_height_c;
+ bytes_per_element = bytes_per_element_c;
+ }
+
+ log2_bytes_per_element = dml_log2(bytes_per_element);
+
+ dml_print("DML_DLG: %s: surf_linear = %d\n", __func__, surf_linear);
+ dml_print("DML_DLG: %s: surf_vert = %d\n", __func__, surf_vert);
+ dml_print("DML_DLG: %s: blk256_width = %d\n", __func__, blk256_width);
+ dml_print("DML_DLG: %s: blk256_height = %d\n", __func__, blk256_height);
+
+ log2_blk256_width = dml_log2((double)blk256_width);
+ log2_blk256_height = dml_log2((double)blk256_height);
+ blk_bytes = surf_linear ?
+ 256 : get_blk_size_bytes((enum source_macro_tile_size) macro_tile_size);
+ log2_blk_bytes = dml_log2((double)blk_bytes);
+ log2_blk_height = 0;
+ log2_blk_width = 0;
+
+ // remember log rule
+ // "+" in log is multiply
+ // "-" in log is divide
+ // "/2" is like square root
+ // blk is vertical biased
+ if (tiling != dm_sw_linear)
+ log2_blk_height = log2_blk256_height
+ + dml_ceil((double)(log2_blk_bytes - 8) / 2.0, 1);
+ else
+ log2_blk_height = 0; // blk height of 1
+
+ log2_blk_width = log2_blk_bytes - log2_bytes_per_element - log2_blk_height;
+
+ if (!surf_vert) {
+ int unsigned temp = 0;
+
+ temp = dml_round_to_multiple(vp_width - 1, blk256_width, 1) + blk256_width;
+ if (data_pitch < blk256_width) {
+ dml_print("WARNING: DML_DLG: %s: swath_size calculation ignoring data_pitch=%u < blk256_width=%u\n", __func__, data_pitch, blk256_width);
+ } else {
+ if (temp > data_pitch) {
+ if (data_pitch >= vp_width)
+ temp = data_pitch;
+ else
+ dml_print("WARNING: DML_DLG: %s: swath_size calculation ignoring data_pitch=%u < vp_width=%u\n", __func__, data_pitch, vp_width);
+ }
+ }
+ rq_dlg_param->swath_width_ub = temp;
+ rq_dlg_param->req_per_swath_ub = temp >> log2_blk256_width;
+ } else {
+ int unsigned temp = 0;
+
+ temp = dml_round_to_multiple(vp_height - 1, blk256_height, 1) + blk256_height;
+ if (surface_height < blk256_height) {
+ dml_print("WARNING: DML_DLG: %s swath_size calculation ignored surface_height=%u < blk256_height=%u\n", __func__, surface_height, blk256_height);
+ } else {
+ if (temp > surface_height) {
+ if (surface_height >= vp_height)
+ temp = surface_height;
+ else
+ dml_print("WARNING: DML_DLG: %s swath_size calculation ignored surface_height=%u < vp_height=%u\n", __func__, surface_height, vp_height);
+ }
+ }
+ rq_dlg_param->swath_width_ub = temp;
+ rq_dlg_param->req_per_swath_ub = temp >> log2_blk256_height;
+ }
+
+ if (!surf_vert)
+ rq_misc_param->full_swath_bytes = rq_dlg_param->swath_width_ub * blk256_height
+ * bytes_per_element;
+ else
+ rq_misc_param->full_swath_bytes = rq_dlg_param->swath_width_ub * blk256_width
+ * bytes_per_element;
+
+ rq_misc_param->blk256_height = blk256_height;
+ rq_misc_param->blk256_width = blk256_width;
+
+ // -------
+ // meta
+ // -------
+ log2_meta_req_bytes = 6; // meta request is 64b and is 8x8byte meta element
+
+ // each 64b meta request for dcn is 8x8 meta elements and
+ // a meta element covers one 256b block of the the data surface.
+ log2_meta_req_height = log2_blk256_height + 3; // meta req is 8x8 byte, each byte represent 1 blk256
+ log2_meta_req_width = log2_meta_req_bytes + 8 - log2_bytes_per_element
+ - log2_meta_req_height;
+ meta_req_width = 1 << log2_meta_req_width;
+ meta_req_height = 1 << log2_meta_req_height;
+ log2_meta_row_height = 0;
+ meta_row_width_ub = 0;
+
+ // the dimensions of a meta row are meta_row_width x meta_row_height in elements.
+ // calculate upper bound of the meta_row_width
+ if (!surf_vert) {
+ log2_meta_row_height = log2_meta_req_height;
+ meta_row_width_ub = dml_round_to_multiple(vp_width - 1, meta_req_width, 1)
+ + meta_req_width;
+ rq_dlg_param->meta_req_per_row_ub = meta_row_width_ub / meta_req_width;
+ } else {
+ log2_meta_row_height = log2_meta_req_width;
+ meta_row_width_ub = dml_round_to_multiple(vp_height - 1, meta_req_height, 1)
+ + meta_req_height;
+ rq_dlg_param->meta_req_per_row_ub = meta_row_width_ub / meta_req_height;
+ }
+ rq_dlg_param->meta_bytes_per_row_ub = rq_dlg_param->meta_req_per_row_ub * 64;
+
+ rq_dlg_param->meta_row_height = 1 << log2_meta_row_height;
+
+ log2_meta_chunk_bytes = dml_log2(rq_sizing_param->meta_chunk_bytes);
+ log2_meta_chunk_height = log2_meta_row_height;
+
+ //full sized meta chunk width in unit of data elements
+ log2_meta_chunk_width = log2_meta_chunk_bytes + 8 - log2_bytes_per_element
+ - log2_meta_chunk_height;
+ log2_min_meta_chunk_bytes = dml_log2(rq_sizing_param->min_meta_chunk_bytes);
+ min_meta_chunk_width = 1
+ << (log2_min_meta_chunk_bytes + 8 - log2_bytes_per_element
+ - log2_meta_chunk_height);
+ meta_chunk_width = 1 << log2_meta_chunk_width;
+ meta_chunk_per_row_int = (unsigned int)(meta_row_width_ub / meta_chunk_width);
+ meta_row_remainder = meta_row_width_ub % meta_chunk_width;
+ meta_chunk_threshold = 0;
+ meta_blk_bytes = 4096;
+ meta_blk_height = blk256_height * 64;
+ meta_blk_width = meta_blk_bytes * 256 / bytes_per_element / meta_blk_height;
+ meta_surface_bytes = meta_pitch
+ * (dml_round_to_multiple(vp_height - 1, meta_blk_height, 1) + meta_blk_height)
+ * bytes_per_element / 256;
+ vmpg_bytes = mode_lib->soc.gpuvm_min_page_size_bytes;
+ meta_pte_req_per_frame_ub = (dml_round_to_multiple(meta_surface_bytes - vmpg_bytes,
+ 8 * vmpg_bytes,
+ 1) + 8 * vmpg_bytes) / (8 * vmpg_bytes);
+ meta_pte_bytes_per_frame_ub = meta_pte_req_per_frame_ub * 64; //64B mpte request
+ rq_dlg_param->meta_pte_bytes_per_frame_ub = meta_pte_bytes_per_frame_ub;
+
+ dml_print("DML_DLG: %s: meta_blk_height = %d\n", __func__, meta_blk_height);
+ dml_print("DML_DLG: %s: meta_blk_width = %d\n", __func__, meta_blk_width);
+ dml_print("DML_DLG: %s: meta_surface_bytes = %d\n", __func__, meta_surface_bytes);
+ dml_print("DML_DLG: %s: meta_pte_req_per_frame_ub = %d\n",
+ __func__,
+ meta_pte_req_per_frame_ub);
+ dml_print("DML_DLG: %s: meta_pte_bytes_per_frame_ub = %d\n",
+ __func__,
+ meta_pte_bytes_per_frame_ub);
+
+ if (!surf_vert)
+ meta_chunk_threshold = 2 * min_meta_chunk_width - meta_req_width;
+ else
+ meta_chunk_threshold = 2 * min_meta_chunk_width - meta_req_height;
+
+ if (meta_row_remainder <= meta_chunk_threshold)
+ rq_dlg_param->meta_chunks_per_row_ub = meta_chunk_per_row_int + 1;
+ else
+ rq_dlg_param->meta_chunks_per_row_ub = meta_chunk_per_row_int + 2;
+
+ // ------
+ // dpte
+ // ------
+ if (surf_linear) {
+ log2_vmpg_height = 0; // one line high
+ } else {
+ log2_vmpg_height = (log2_vmpg_bytes - 8) / 2 + log2_blk256_height;
+ }
+ log2_vmpg_width = log2_vmpg_bytes - log2_bytes_per_element - log2_vmpg_height;
+
+ // only 3 possible shapes for dpte request in dimensions of ptes: 8x1, 4x2, 2x4.
+ if (surf_linear) { //one 64B PTE request returns 8 PTEs
+ log2_dpte_req_height_ptes = 0;
+ log2_dpte_req_width = log2_vmpg_width + 3;
+ log2_dpte_req_height = 0;
+ } else if (log2_blk_bytes == 12) { //4KB tile means 4kB page size
+ //one 64B req gives 8x1 PTEs for 4KB tile
+ log2_dpte_req_height_ptes = 0;
+ log2_dpte_req_width = log2_blk_width + 3;
+ log2_dpte_req_height = log2_blk_height + 0;
+ } else if ((log2_blk_bytes >= 16) && (log2_vmpg_bytes == 12)) { // tile block >= 64KB
+ //two 64B reqs of 2x4 PTEs give 16 PTEs to cover 64KB
+ log2_dpte_req_height_ptes = 4;
+ log2_dpte_req_width = log2_blk256_width + 4; // log2_64KB_width
+ log2_dpte_req_height = log2_blk256_height + 4; // log2_64KB_height
+ } else { //64KB page size and must 64KB tile block
+ //one 64B req gives 8x1 PTEs for 64KB tile
+ log2_dpte_req_height_ptes = 0;
+ log2_dpte_req_width = log2_blk_width + 3;
+ log2_dpte_req_height = log2_blk_height + 0;
+ }
+
+ // The dpte request dimensions in data elements is dpte_req_width x dpte_req_height
+ // log2_vmpg_width is how much 1 pte represent, now calculating how much a 64b pte req represent
+ // That depends on the pte shape (i.e. 8x1, 4x2, 2x4)
+ //log2_dpte_req_height = log2_vmpg_height + log2_dpte_req_height_ptes;
+ //log2_dpte_req_width = log2_vmpg_width + log2_dpte_req_width_ptes;
+ dpte_req_height = 1 << log2_dpte_req_height;
+ dpte_req_width = 1 << log2_dpte_req_width;
+
+ // calculate pitch dpte row buffer can hold
+ // round the result down to a power of two.
+ if (surf_linear) {
+ unsigned int dpte_row_height = 0;
+
+ log2_dpte_row_height_linear = dml_floor(dml_log2(dpte_buf_in_pte_reqs * dpte_req_width / data_pitch), 1);
+
+ dml_print("DML_DLG: %s: is_chroma = %d\n", __func__, is_chroma);
+ dml_print("DML_DLG: %s: dpte_buf_in_pte_reqs = %d\n", __func__, dpte_buf_in_pte_reqs);
+ dml_print("DML_DLG: %s: log2_dpte_row_height_linear = %d\n", __func__, log2_dpte_row_height_linear);
+
+ ASSERT(log2_dpte_row_height_linear >= 3);
+
+ if (log2_dpte_row_height_linear > 7)
+ log2_dpte_row_height_linear = 7;
+
+ log2_dpte_row_height = log2_dpte_row_height_linear;
+ // For linear, the dpte row is pitch dependent and the pte requests wrap at the pitch boundary.
+ // the dpte_row_width_ub is the upper bound of data_pitch*dpte_row_height in elements with this unique buffering.
+ dpte_row_height = 1 << log2_dpte_row_height;
+ dpte_row_width_ub = dml_round_to_multiple(data_pitch * dpte_row_height - 1,
+ dpte_req_width,
+ 1) + dpte_req_width;
+ rq_dlg_param->dpte_req_per_row_ub = dpte_row_width_ub / dpte_req_width;
+ } else {
+ // the upper bound of the dpte_row_width without dependency on viewport position follows.
+ // for tiled mode, row height is the same as req height and row store up to vp size upper bound
+ if (!surf_vert) {
+ log2_dpte_row_height = log2_dpte_req_height;
+ dpte_row_width_ub = dml_round_to_multiple(vp_width - 1, dpte_req_width, 1)
+ + dpte_req_width;
+ rq_dlg_param->dpte_req_per_row_ub = dpte_row_width_ub / dpte_req_width;
+ } else {
+ log2_dpte_row_height =
+ (log2_blk_width < log2_dpte_req_width) ?
+ log2_blk_width : log2_dpte_req_width;
+ dpte_row_width_ub = dml_round_to_multiple(vp_height - 1, dpte_req_height, 1)
+ + dpte_req_height;
+ rq_dlg_param->dpte_req_per_row_ub = dpte_row_width_ub / dpte_req_height;
+ }
+ }
+ if (log2_blk_bytes >= 16 && log2_vmpg_bytes == 12) // tile block >= 64KB
+ rq_dlg_param->dpte_bytes_per_row_ub = rq_dlg_param->dpte_req_per_row_ub * 128; //2*64B dpte request
+ else
+ rq_dlg_param->dpte_bytes_per_row_ub = rq_dlg_param->dpte_req_per_row_ub * 64; //64B dpte request
+
+ rq_dlg_param->dpte_row_height = 1 << log2_dpte_row_height;
+
+ // the dpte_group_bytes is reduced for the specific case of vertical
+ // access of a tile surface that has dpte request of 8x1 ptes.
+ if (hostvm_enable)
+ rq_sizing_param->dpte_group_bytes = 512;
+ else {
+ if (!surf_linear & (log2_dpte_req_height_ptes == 0) & surf_vert) //reduced, in this case, will have page fault within a group
+ rq_sizing_param->dpte_group_bytes = 512;
+ else
+ rq_sizing_param->dpte_group_bytes = 2048;
+ }
+
+ //since pte request size is 64byte, the number of data pte requests per full sized group is as follows.
+ log2_dpte_group_bytes = dml_log2(rq_sizing_param->dpte_group_bytes);
+ log2_dpte_group_length = log2_dpte_group_bytes - 6; //length in 64b requests
+
+ // full sized data pte group width in elements
+ if (!surf_vert)
+ log2_dpte_group_width = log2_dpte_group_length + log2_dpte_req_width;
+ else
+ log2_dpte_group_width = log2_dpte_group_length + log2_dpte_req_height;
+
+ //But if the tile block >=64KB and the page size is 4KB, then each dPTE request is 2*64B
+ if ((log2_blk_bytes >= 16) && (log2_vmpg_bytes == 12)) // tile block >= 64KB
+ log2_dpte_group_width = log2_dpte_group_width - 1;
+
+ dpte_group_width = 1 << log2_dpte_group_width;
+
+ // since dpte groups are only aligned to dpte_req_width and not dpte_group_width,
+ // the upper bound for the dpte groups per row is as follows.
+ rq_dlg_param->dpte_groups_per_row_ub = dml_ceil((double)dpte_row_width_ub / dpte_group_width,
+ 1);
+}
+
+static void get_surf_rq_param(struct display_mode_lib *mode_lib,
+ display_data_rq_sizing_params_st *rq_sizing_param,
+ display_data_rq_dlg_params_st *rq_dlg_param,
+ display_data_rq_misc_params_st *rq_misc_param,
+ const display_pipe_params_st pipe_param,
+ bool is_chroma,
+ bool is_alpha)
+{
+ bool mode_422 = 0;
+ unsigned int vp_width = 0;
+ unsigned int vp_height = 0;
+ unsigned int data_pitch = 0;
+ unsigned int meta_pitch = 0;
+ unsigned int surface_height = 0;
+ unsigned int ppe = mode_422 ? 2 : 1;
+
+ // FIXME check if ppe apply for both luma and chroma in 422 case
+ if (is_chroma | is_alpha) {
+ vp_width = pipe_param.src.viewport_width_c / ppe;
+ vp_height = pipe_param.src.viewport_height_c;
+ data_pitch = pipe_param.src.data_pitch_c;
+ meta_pitch = pipe_param.src.meta_pitch_c;
+ surface_height = pipe_param.src.surface_height_y / 2.0;
+ } else {
+ vp_width = pipe_param.src.viewport_width / ppe;
+ vp_height = pipe_param.src.viewport_height;
+ data_pitch = pipe_param.src.data_pitch;
+ meta_pitch = pipe_param.src.meta_pitch;
+ surface_height = pipe_param.src.surface_height_y;
+ }
+
+ if (pipe_param.dest.odm_combine) {
+ unsigned int access_dir = 0;
+ unsigned int full_src_vp_width = 0;
+ unsigned int hactive_odm = 0;
+ unsigned int src_hactive_odm = 0;
+ access_dir = (pipe_param.src.source_scan == dm_vert); // vp access direction: horizontal or vertical accessed
+ hactive_odm = pipe_param.dest.hactive / ((unsigned int)pipe_param.dest.odm_combine*2);
+ if (is_chroma) {
+ full_src_vp_width = pipe_param.scale_ratio_depth.hscl_ratio_c * pipe_param.dest.full_recout_width;
+ src_hactive_odm = pipe_param.scale_ratio_depth.hscl_ratio_c * hactive_odm;
+ } else {
+ full_src_vp_width = pipe_param.scale_ratio_depth.hscl_ratio * pipe_param.dest.full_recout_width;
+ src_hactive_odm = pipe_param.scale_ratio_depth.hscl_ratio * hactive_odm;
+ }
+
+ if (access_dir == 0) {
+ vp_width = dml_min(full_src_vp_width, src_hactive_odm);
+ dml_print("DML_DLG: %s: vp_width = %d\n", __func__, vp_width);
+ } else {
+ vp_height = dml_min(full_src_vp_width, src_hactive_odm);
+ dml_print("DML_DLG: %s: vp_height = %d\n", __func__, vp_height);
+ }
+ dml_print("DML_DLG: %s: full_src_vp_width = %d\n", __func__, full_src_vp_width);
+ dml_print("DML_DLG: %s: hactive_odm = %d\n", __func__, hactive_odm);
+ dml_print("DML_DLG: %s: src_hactive_odm = %d\n", __func__, src_hactive_odm);
+ }
+
+ rq_sizing_param->chunk_bytes = 8192;
+
+ if (is_alpha) {
+ rq_sizing_param->chunk_bytes = 4096;
+ }
+
+ if (rq_sizing_param->chunk_bytes == 64 * 1024)
+ rq_sizing_param->min_chunk_bytes = 0;
+ else
+ rq_sizing_param->min_chunk_bytes = 1024;
+
+ rq_sizing_param->meta_chunk_bytes = 2048;
+ rq_sizing_param->min_meta_chunk_bytes = 256;
+
+ if (pipe_param.src.hostvm)
+ rq_sizing_param->mpte_group_bytes = 512;
+ else
+ rq_sizing_param->mpte_group_bytes = 2048;
+
+ get_meta_and_pte_attr(mode_lib,
+ rq_dlg_param,
+ rq_misc_param,
+ rq_sizing_param,
+ vp_width,
+ vp_height,
+ data_pitch,
+ meta_pitch,
+ pipe_param.src.source_format,
+ pipe_param.src.sw_mode,
+ pipe_param.src.macro_tile_size,
+ pipe_param.src.source_scan,
+ pipe_param.src.hostvm,
+ is_chroma,
+ surface_height);
+}
+
+static void dml_rq_dlg_get_rq_params(struct display_mode_lib *mode_lib,
+ display_rq_params_st *rq_param,
+ const display_pipe_params_st pipe_param)
+{
+ // get param for luma surface
+ rq_param->yuv420 = pipe_param.src.source_format == dm_420_8
+ || pipe_param.src.source_format == dm_420_10
+ || pipe_param.src.source_format == dm_rgbe_alpha
+ || pipe_param.src.source_format == dm_420_12;
+
+ rq_param->yuv420_10bpc = pipe_param.src.source_format == dm_420_10;
+
+ rq_param->rgbe_alpha = (pipe_param.src.source_format == dm_rgbe_alpha)?1:0;
+
+ get_surf_rq_param(mode_lib,
+ &(rq_param->sizing.rq_l),
+ &(rq_param->dlg.rq_l),
+ &(rq_param->misc.rq_l),
+ pipe_param,
+ 0,
+ 0);
+
+ if (is_dual_plane((enum source_format_class)(pipe_param.src.source_format))) {
+ // get param for chroma surface
+ get_surf_rq_param(mode_lib,
+ &(rq_param->sizing.rq_c),
+ &(rq_param->dlg.rq_c),
+ &(rq_param->misc.rq_c),
+ pipe_param,
+ 1,
+ rq_param->rgbe_alpha);
+ }
+
+ // calculate how to split the det buffer space between luma and chroma
+ handle_det_buf_split(mode_lib, rq_param, pipe_param.src);
+ print__rq_params_st(mode_lib, *rq_param);
+}
+
+void dml30_rq_dlg_get_rq_reg(struct display_mode_lib *mode_lib,
+ display_rq_regs_st *rq_regs,
+ const display_pipe_params_st pipe_param)
+{
+ display_rq_params_st rq_param = { 0 };
+
+ memset(rq_regs, 0, sizeof(*rq_regs));
+ dml_rq_dlg_get_rq_params(mode_lib, &rq_param, pipe_param);
+ extract_rq_regs(mode_lib, rq_regs, rq_param);
+
+ print__rq_regs_st(mode_lib, *rq_regs);
+}
+
+static void calculate_ttu_cursor(struct display_mode_lib *mode_lib,
+ double *refcyc_per_req_delivery_pre_cur,
+ double *refcyc_per_req_delivery_cur,
+ double refclk_freq_in_mhz,
+ double ref_freq_to_pix_freq,
+ double hscale_pixel_rate_l,
+ double hscl_ratio,
+ double vratio_pre_l,
+ double vratio_l,
+ unsigned int cur_width,
+ enum cursor_bpp cur_bpp)
+{
+ unsigned int cur_src_width = cur_width;
+ unsigned int cur_req_size = 0;
+ unsigned int cur_req_width = 0;
+ double cur_width_ub = 0.0;
+ double cur_req_per_width = 0.0;
+ double hactive_cur = 0.0;
+
+ ASSERT(cur_src_width <= 256);
+
+ *refcyc_per_req_delivery_pre_cur = 0.0;
+ *refcyc_per_req_delivery_cur = 0.0;
+ if (cur_src_width > 0) {
+ unsigned int cur_bit_per_pixel = 0;
+
+ if (cur_bpp == dm_cur_2bit) {
+ cur_req_size = 64; // byte
+ cur_bit_per_pixel = 2;
+ } else { // 32bit
+ cur_bit_per_pixel = 32;
+ if (cur_src_width >= 1 && cur_src_width <= 16)
+ cur_req_size = 64;
+ else if (cur_src_width >= 17 && cur_src_width <= 31)
+ cur_req_size = 128;
+ else
+ cur_req_size = 256;
+ }
+
+ cur_req_width = (double)cur_req_size / ((double)cur_bit_per_pixel / 8.0);
+ cur_width_ub = dml_ceil((double)cur_src_width / (double)cur_req_width, 1)
+ * (double)cur_req_width;
+ cur_req_per_width = cur_width_ub / (double)cur_req_width;
+ hactive_cur = (double)cur_src_width / hscl_ratio; // FIXME: oswin to think about what to do for cursor
+
+ if (vratio_pre_l <= 1.0) {
+ *refcyc_per_req_delivery_pre_cur = hactive_cur * ref_freq_to_pix_freq
+ / (double)cur_req_per_width;
+ } else {
+ *refcyc_per_req_delivery_pre_cur = (double)refclk_freq_in_mhz
+ * (double)cur_src_width / hscale_pixel_rate_l
+ / (double)cur_req_per_width;
+ }
+
+ ASSERT(*refcyc_per_req_delivery_pre_cur < dml_pow(2, 13));
+
+ if (vratio_l <= 1.0) {
+ *refcyc_per_req_delivery_cur = hactive_cur * ref_freq_to_pix_freq
+ / (double)cur_req_per_width;
+ } else {
+ *refcyc_per_req_delivery_cur = (double)refclk_freq_in_mhz
+ * (double)cur_src_width / hscale_pixel_rate_l
+ / (double)cur_req_per_width;
+ }
+
+ dml_print("DML_DLG: %s: cur_req_width = %d\n",
+ __func__,
+ cur_req_width);
+ dml_print("DML_DLG: %s: cur_width_ub = %3.2f\n",
+ __func__,
+ cur_width_ub);
+ dml_print("DML_DLG: %s: cur_req_per_width = %3.2f\n",
+ __func__,
+ cur_req_per_width);
+ dml_print("DML_DLG: %s: hactive_cur = %3.2f\n",
+ __func__,
+ hactive_cur);
+ dml_print("DML_DLG: %s: refcyc_per_req_delivery_pre_cur = %3.2f\n",
+ __func__,
+ *refcyc_per_req_delivery_pre_cur);
+ dml_print("DML_DLG: %s: refcyc_per_req_delivery_cur = %3.2f\n",
+ __func__,
+ *refcyc_per_req_delivery_cur);
+
+ ASSERT(*refcyc_per_req_delivery_cur < dml_pow(2, 13));
+ }
+}
+
+// Note: currently taken in as is.
+// Nice to decouple code from hw register implement and extract code that are repeated for luma and chroma.
+static void dml_rq_dlg_get_dlg_params(struct display_mode_lib *mode_lib,
+ const display_e2e_pipe_params_st *e2e_pipe_param,
+ const unsigned int num_pipes,
+ const unsigned int pipe_idx,
+ display_dlg_regs_st *disp_dlg_regs,
+ display_ttu_regs_st *disp_ttu_regs,
+ const display_rq_dlg_params_st rq_dlg_param,
+ const display_dlg_sys_params_st dlg_sys_param,
+ const bool cstate_en,
+ const bool pstate_en,
+ const bool vm_en,
+ const bool ignore_viewport_pos,
+ const bool immediate_flip_support)
+{
+ const display_pipe_source_params_st *src = &e2e_pipe_param[pipe_idx].pipe.src;
+ const display_pipe_dest_params_st *dst = &e2e_pipe_param[pipe_idx].pipe.dest;
+ const display_output_params_st *dout = &e2e_pipe_param[pipe_idx].dout;
+ const display_clocks_and_cfg_st *clks = &e2e_pipe_param[pipe_idx].clks_cfg;
+ const scaler_ratio_depth_st *scl = &e2e_pipe_param[pipe_idx].pipe.scale_ratio_depth;
+ const scaler_taps_st *taps = &e2e_pipe_param[pipe_idx].pipe.scale_taps;
+
+ // -------------------------
+ // Section 1.15.2.1: OTG dependent Params
+ // -------------------------
+ // Timing
+ unsigned int htotal = dst->htotal;
+ // unsigned int hblank_start = dst.hblank_start; // TODO: Remove
+ unsigned int hblank_end = dst->hblank_end;
+ unsigned int vblank_start = dst->vblank_start;
+ unsigned int vblank_end = dst->vblank_end;
+ unsigned int min_vblank = mode_lib->ip.min_vblank_lines;
+
+ double dppclk_freq_in_mhz = clks->dppclk_mhz;
+ double dispclk_freq_in_mhz = clks->dispclk_mhz;
+ double refclk_freq_in_mhz = clks->refclk_mhz;
+ double pclk_freq_in_mhz = dst->pixel_rate_mhz;
+ bool interlaced = dst->interlaced;
+
+ double ref_freq_to_pix_freq = refclk_freq_in_mhz / pclk_freq_in_mhz;
+
+ double min_dcfclk_mhz = 0;
+ double t_calc_us = 0;
+ double min_ttu_vblank = 0;
+
+ double min_dst_y_ttu_vblank = 0;
+ unsigned int dlg_vblank_start = 0;
+ bool dual_plane = 0;
+ bool mode_422 = 0;
+ unsigned int access_dir = 0;
+ unsigned int vp_height_l = 0;
+ unsigned int vp_width_l = 0;
+ unsigned int vp_height_c = 0;
+ unsigned int vp_width_c = 0;
+
+ // Scaling
+ unsigned int htaps_l = 0;
+ unsigned int htaps_c = 0;
+ double hratio_l = 0;
+ double hratio_c = 0;
+ double vratio_l = 0;
+ double vratio_c = 0;
+ bool scl_enable = 0;
+
+ double line_time_in_us = 0;
+ // double vinit_l;
+ // double vinit_c;
+ // double vinit_bot_l;
+ // double vinit_bot_c;
+
+ // unsigned int swath_height_l;
+ unsigned int swath_width_ub_l = 0;
+ // unsigned int dpte_bytes_per_row_ub_l;
+ unsigned int dpte_groups_per_row_ub_l = 0;
+ // unsigned int meta_pte_bytes_per_frame_ub_l;
+ // unsigned int meta_bytes_per_row_ub_l;
+
+ // unsigned int swath_height_c;
+ unsigned int swath_width_ub_c = 0;
+ // unsigned int dpte_bytes_per_row_ub_c;
+ unsigned int dpte_groups_per_row_ub_c = 0;
+
+ unsigned int meta_chunks_per_row_ub_l = 0;
+ unsigned int meta_chunks_per_row_ub_c = 0;
+ unsigned int vupdate_offset = 0;
+ unsigned int vupdate_width = 0;
+ unsigned int vready_offset = 0;
+
+ unsigned int dppclk_delay_subtotal = 0;
+ unsigned int dispclk_delay_subtotal = 0;
+ unsigned int pixel_rate_delay_subtotal = 0;
+
+ unsigned int vstartup_start = 0;
+ unsigned int dst_x_after_scaler = 0;
+ unsigned int dst_y_after_scaler = 0;
+ double line_wait = 0;
+ double dst_y_prefetch = 0;
+ double dst_y_per_vm_vblank = 0;
+ double dst_y_per_row_vblank = 0;
+ double dst_y_per_vm_flip = 0;
+ double dst_y_per_row_flip = 0;
+ double max_dst_y_per_vm_vblank = 0;
+ double max_dst_y_per_row_vblank = 0;
+ double lsw = 0;
+ double vratio_pre_l = 0;
+ double vratio_pre_c = 0;
+ unsigned int req_per_swath_ub_l = 0;
+ unsigned int req_per_swath_ub_c = 0;
+ unsigned int meta_row_height_l = 0;
+ unsigned int meta_row_height_c = 0;
+ unsigned int swath_width_pixels_ub_l = 0;
+ unsigned int swath_width_pixels_ub_c = 0;
+ unsigned int scaler_rec_in_width_l = 0;
+ unsigned int scaler_rec_in_width_c = 0;
+ unsigned int dpte_row_height_l = 0;
+ unsigned int dpte_row_height_c = 0;
+ double hscale_pixel_rate_l = 0;
+ double hscale_pixel_rate_c = 0;
+ double min_hratio_fact_l = 0;
+ double min_hratio_fact_c = 0;
+ double refcyc_per_line_delivery_pre_l = 0;
+ double refcyc_per_line_delivery_pre_c = 0;
+ double refcyc_per_line_delivery_l = 0;
+ double refcyc_per_line_delivery_c = 0;
+
+ double refcyc_per_req_delivery_pre_l = 0;
+ double refcyc_per_req_delivery_pre_c = 0;
+ double refcyc_per_req_delivery_l = 0;
+ double refcyc_per_req_delivery_c = 0;
+
+ unsigned int full_recout_width = 0;
+ double refcyc_per_req_delivery_pre_cur0 = 0;
+ double refcyc_per_req_delivery_cur0 = 0;
+ double refcyc_per_req_delivery_pre_cur1 = 0;
+ double refcyc_per_req_delivery_cur1 = 0;
+
+ unsigned int pipe_index_in_combine[DC__NUM_PIPES__MAX] = { 0 };
+
+ memset(disp_dlg_regs, 0, sizeof(*disp_dlg_regs));
+ memset(disp_ttu_regs, 0, sizeof(*disp_ttu_regs));
+
+ dml_print("DML_DLG: %s: cstate_en = %d\n", __func__, cstate_en);
+ dml_print("DML_DLG: %s: pstate_en = %d\n", __func__, pstate_en);
+ dml_print("DML_DLG: %s: vm_en = %d\n", __func__, vm_en);
+ dml_print("DML_DLG: %s: ignore_viewport_pos = %d\n", __func__, ignore_viewport_pos);
+ dml_print("DML_DLG: %s: immediate_flip_support = %d\n", __func__, immediate_flip_support);
+
+ dml_print("DML_DLG: %s: dppclk_freq_in_mhz = %3.2f\n", __func__, dppclk_freq_in_mhz);
+ dml_print("DML_DLG: %s: dispclk_freq_in_mhz = %3.2f\n", __func__, dispclk_freq_in_mhz);
+ dml_print("DML_DLG: %s: refclk_freq_in_mhz = %3.2f\n", __func__, refclk_freq_in_mhz);
+ dml_print("DML_DLG: %s: pclk_freq_in_mhz = %3.2f\n", __func__, pclk_freq_in_mhz);
+ dml_print("DML_DLG: %s: interlaced = %d\n", __func__, interlaced);
+ ASSERT(ref_freq_to_pix_freq < 4.0);
+
+ disp_dlg_regs->ref_freq_to_pix_freq =
+ (unsigned int)(ref_freq_to_pix_freq * dml_pow(2, 19));
+ disp_dlg_regs->refcyc_per_htotal = (unsigned int)(ref_freq_to_pix_freq * (double)htotal
+ * dml_pow(2, 8));
+ disp_dlg_regs->dlg_vblank_end = interlaced ? (vblank_end / 2) : vblank_end; // 15 bits
+
+ min_dcfclk_mhz = dlg_sys_param.deepsleep_dcfclk_mhz;
+ t_calc_us = get_tcalc(mode_lib, e2e_pipe_param, num_pipes);
+ min_ttu_vblank = get_min_ttu_vblank(mode_lib, e2e_pipe_param, num_pipes, pipe_idx);
+
+ min_dst_y_ttu_vblank = min_ttu_vblank * pclk_freq_in_mhz / (double)htotal;
+ dlg_vblank_start = interlaced ? (vblank_start / 2) : vblank_start;
+
+ disp_dlg_regs->min_dst_y_next_start = (unsigned int)(((double)dlg_vblank_start
+ ) * dml_pow(2, 2));
+ ASSERT(disp_dlg_regs->min_dst_y_next_start < (unsigned int)dml_pow(2, 18));
+
+ dml_print("DML_DLG: %s: min_dcfclk_mhz = %3.2f\n",
+ __func__,
+ min_dcfclk_mhz);
+ dml_print("DML_DLG: %s: min_ttu_vblank = %3.2f\n",
+ __func__,
+ min_ttu_vblank);
+ dml_print("DML_DLG: %s: min_dst_y_ttu_vblank = %3.2f\n",
+ __func__,
+ min_dst_y_ttu_vblank);
+ dml_print("DML_DLG: %s: t_calc_us = %3.2f\n",
+ __func__,
+ t_calc_us);
+ dml_print("DML_DLG: %s: disp_dlg_regs->min_dst_y_next_start = 0x%0x\n",
+ __func__,
+ disp_dlg_regs->min_dst_y_next_start);
+ dml_print("DML_DLG: %s: ref_freq_to_pix_freq = %3.2f\n",
+ __func__,
+ ref_freq_to_pix_freq);
+
+ // -------------------------
+ // Section 1.15.2.2: Prefetch, Active and TTU
+ // -------------------------
+ // Prefetch Calc
+ // Source
+ // dcc_en = src.dcc;
+ dual_plane = is_dual_plane((enum source_format_class)(src->source_format));
+ mode_422 = 0; // TODO
+ access_dir = (src->source_scan == dm_vert); // vp access direction: horizontal or vertical accessed
+ vp_height_l = src->viewport_height;
+ vp_width_l = src->viewport_width;
+ vp_height_c = src->viewport_height_c;
+ vp_width_c = src->viewport_width_c;
+
+ // Scaling
+ htaps_l = taps->htaps;
+ htaps_c = taps->htaps_c;
+ hratio_l = scl->hscl_ratio;
+ hratio_c = scl->hscl_ratio_c;
+ vratio_l = scl->vscl_ratio;
+ vratio_c = scl->vscl_ratio_c;
+ scl_enable = scl->scl_enable;
+
+ line_time_in_us = (htotal / pclk_freq_in_mhz);
+ swath_width_ub_l = rq_dlg_param.rq_l.swath_width_ub;
+ dpte_groups_per_row_ub_l = rq_dlg_param.rq_l.dpte_groups_per_row_ub;
+ swath_width_ub_c = rq_dlg_param.rq_c.swath_width_ub;
+ dpte_groups_per_row_ub_c = rq_dlg_param.rq_c.dpte_groups_per_row_ub;
+
+ meta_chunks_per_row_ub_l = rq_dlg_param.rq_l.meta_chunks_per_row_ub;
+ meta_chunks_per_row_ub_c = rq_dlg_param.rq_c.meta_chunks_per_row_ub;
+ vupdate_offset = dst->vupdate_offset;
+ vupdate_width = dst->vupdate_width;
+ vready_offset = dst->vready_offset;
+
+ dppclk_delay_subtotal = mode_lib->ip.dppclk_delay_subtotal;
+ dispclk_delay_subtotal = mode_lib->ip.dispclk_delay_subtotal;
+
+ if (scl_enable)
+ dppclk_delay_subtotal += mode_lib->ip.dppclk_delay_scl;
+ else
+ dppclk_delay_subtotal += mode_lib->ip.dppclk_delay_scl_lb_only;
+
+ dppclk_delay_subtotal += mode_lib->ip.dppclk_delay_cnvc_formatter
+ + src->num_cursors * mode_lib->ip.dppclk_delay_cnvc_cursor;
+
+ if (dout->dsc_enable) {
+ double dsc_delay = get_dsc_delay(mode_lib, e2e_pipe_param, num_pipes, pipe_idx);
+
+ dispclk_delay_subtotal += dsc_delay;
+ }
+
+ pixel_rate_delay_subtotal = dppclk_delay_subtotal * pclk_freq_in_mhz / dppclk_freq_in_mhz
+ + dispclk_delay_subtotal * pclk_freq_in_mhz / dispclk_freq_in_mhz;
+
+ vstartup_start = dst->vstartup_start;
+ if (interlaced) {
+ if (vstartup_start / 2.0
+ - (double)(vready_offset + vupdate_width + vupdate_offset) / htotal
+ <= vblank_end / 2.0)
+ disp_dlg_regs->vready_after_vcount0 = 1;
+ else
+ disp_dlg_regs->vready_after_vcount0 = 0;
+ } else {
+ if (vstartup_start
+ - (double)(vready_offset + vupdate_width + vupdate_offset) / htotal
+ <= vblank_end)
+ disp_dlg_regs->vready_after_vcount0 = 1;
+ else
+ disp_dlg_regs->vready_after_vcount0 = 0;
+ }
+
+ // TODO: Where is this coming from?
+ if (interlaced)
+ vstartup_start = vstartup_start / 2;
+
+ // TODO: What if this min_vblank doesn't match the value in the dml_config_settings.cpp?
+ if (vstartup_start >= min_vblank) {
+ dml_print("WARNING: DML_DLG: %s: vblank_start=%d vblank_end=%d\n",
+ __func__,
+ vblank_start,
+ vblank_end);
+ dml_print("WARNING: DML_DLG: %s: vstartup_start=%d should be less than min_vblank=%d\n",
+ __func__,
+ vstartup_start,
+ min_vblank);
+ min_vblank = vstartup_start + 1;
+ dml_print("WARNING: DML_DLG: %s: vstartup_start=%d should be less than min_vblank=%d\n",
+ __func__,
+ vstartup_start,
+ min_vblank);
+ }
+
+ dst_x_after_scaler = get_dst_x_after_scaler(mode_lib, e2e_pipe_param, num_pipes, pipe_idx);
+ dst_y_after_scaler = get_dst_y_after_scaler(mode_lib, e2e_pipe_param, num_pipes, pipe_idx);
+
+ // do some adjustment on the dst_after scaler to account for odm combine mode
+ dml_print("DML_DLG: %s: input dst_x_after_scaler = %d\n",
+ __func__,
+ dst_x_after_scaler);
+ dml_print("DML_DLG: %s: input dst_y_after_scaler = %d\n",
+ __func__,
+ dst_y_after_scaler);
+
+ // need to figure out which side of odm combine we're in
+ if (dst->odm_combine) {
+ // figure out which pipes go together
+ bool visited[DC__NUM_PIPES__MAX] = { false };
+ unsigned int i, j, k;
+
+ for (k = 0; k < num_pipes; ++k) {
+ visited[k] = false;
+ pipe_index_in_combine[k] = 0;
+ }
+
+ for (i = 0; i < num_pipes; i++) {
+ if (e2e_pipe_param[i].pipe.src.is_hsplit && !visited[i]) {
+
+ unsigned int grp = e2e_pipe_param[i].pipe.src.hsplit_grp;
+ unsigned int grp_idx = 0;
+
+ for (j = i; j < num_pipes; j++) {
+ if (e2e_pipe_param[j].pipe.src.hsplit_grp == grp
+ && e2e_pipe_param[j].pipe.src.is_hsplit && !visited[j]) {
+ pipe_index_in_combine[j] = grp_idx;
+ dml_print("DML_DLG: %s: pipe[%d] is in grp %d idx %d\n", __func__, j, grp, grp_idx);
+ grp_idx++;
+ visited[j] = true;
+ }
+ }
+ }
+ }
+
+ }
+
+ if (dst->odm_combine == dm_odm_combine_mode_disabled) {
+ disp_dlg_regs->refcyc_h_blank_end = (unsigned int)((double) hblank_end * ref_freq_to_pix_freq);
+ } else {
+ unsigned int odm_combine_factor = (dst->odm_combine == dm_odm_combine_mode_2to1 ? 2 : 4); // TODO: We should really check that 4to1 is supported before setting it to 4
+ unsigned int odm_pipe_index = pipe_index_in_combine[pipe_idx];
+ disp_dlg_regs->refcyc_h_blank_end = (unsigned int)(((double) hblank_end + odm_pipe_index * (double) dst->hactive / odm_combine_factor) * ref_freq_to_pix_freq);
+ }
+ ASSERT(disp_dlg_regs->refcyc_h_blank_end < (unsigned int)dml_pow(2, 13));
+
+ dml_print("DML_DLG: %s: htotal = %d\n", __func__, htotal);
+ dml_print("DML_DLG: %s: pixel_rate_delay_subtotal = %d\n",
+ __func__,
+ pixel_rate_delay_subtotal);
+ dml_print("DML_DLG: %s: dst_x_after_scaler[%d] = %d\n",
+ __func__,
+ pipe_idx,
+ dst_x_after_scaler);
+ dml_print("DML_DLG: %s: dst_y_after_scaler[%d] = %d\n",
+ __func__,
+ pipe_idx,
+ dst_y_after_scaler);
+
+ // Lwait
+ // TODO: Should this be urgent_latency_pixel_mixed_with_vm_data_us?
+ line_wait = mode_lib->soc.urgent_latency_pixel_data_only_us;
+ if (cstate_en)
+ line_wait = dml_max(mode_lib->soc.sr_enter_plus_exit_time_us, line_wait);
+ if (pstate_en)
+ line_wait = dml_max(mode_lib->soc.dram_clock_change_latency_us
+ + mode_lib->soc.urgent_latency_pixel_data_only_us, // TODO: Should this be urgent_latency_pixel_mixed_with_vm_data_us?
+ line_wait);
+ line_wait = line_wait / line_time_in_us;
+
+ dst_y_prefetch = get_dst_y_prefetch(mode_lib, e2e_pipe_param, num_pipes, pipe_idx);
+ dml_print("DML_DLG: %s: dst_y_prefetch (after rnd) = %3.2f\n", __func__, dst_y_prefetch);
+
+ dst_y_per_vm_vblank = get_dst_y_per_vm_vblank(mode_lib,
+ e2e_pipe_param,
+ num_pipes,
+ pipe_idx);
+ dst_y_per_row_vblank = get_dst_y_per_row_vblank(mode_lib,
+ e2e_pipe_param,
+ num_pipes,
+ pipe_idx);
+ dst_y_per_vm_flip = get_dst_y_per_vm_flip(mode_lib, e2e_pipe_param, num_pipes, pipe_idx);
+ dst_y_per_row_flip = get_dst_y_per_row_flip(mode_lib, e2e_pipe_param, num_pipes, pipe_idx);
+
+ max_dst_y_per_vm_vblank = 32.0; //U5.2
+ max_dst_y_per_row_vblank = 16.0; //U4.2
+
+ // magic!
+ if (htotal <= 75) {
+ min_vblank = 300;
+ max_dst_y_per_vm_vblank = 100.0;
+ max_dst_y_per_row_vblank = 100.0;
+ }
+
+ dml_print("DML_DLG: %s: dst_y_per_vm_flip = %3.2f\n", __func__, dst_y_per_vm_flip);
+ dml_print("DML_DLG: %s: dst_y_per_row_flip = %3.2f\n", __func__, dst_y_per_row_flip);
+ dml_print("DML_DLG: %s: dst_y_per_vm_vblank = %3.2f\n", __func__, dst_y_per_vm_vblank);
+ dml_print("DML_DLG: %s: dst_y_per_row_vblank = %3.2f\n", __func__, dst_y_per_row_vblank);
+
+ ASSERT(dst_y_per_vm_vblank < max_dst_y_per_vm_vblank);
+ ASSERT(dst_y_per_row_vblank < max_dst_y_per_row_vblank);
+
+ ASSERT(dst_y_prefetch > (dst_y_per_vm_vblank + dst_y_per_row_vblank));
+ lsw = dst_y_prefetch - (dst_y_per_vm_vblank + dst_y_per_row_vblank);
+
+ dml_print("DML_DLG: %s: lsw = %3.2f\n", __func__, lsw);
+
+ vratio_pre_l = get_vratio_prefetch_l(mode_lib, e2e_pipe_param, num_pipes, pipe_idx);
+ vratio_pre_c = get_vratio_prefetch_c(mode_lib, e2e_pipe_param, num_pipes, pipe_idx);
+
+ dml_print("DML_DLG: %s: vratio_pre_l=%3.2f\n", __func__, vratio_pre_l);
+ dml_print("DML_DLG: %s: vratio_pre_c=%3.2f\n", __func__, vratio_pre_c);
+
+ // Active
+ req_per_swath_ub_l = rq_dlg_param.rq_l.req_per_swath_ub;
+ req_per_swath_ub_c = rq_dlg_param.rq_c.req_per_swath_ub;
+ meta_row_height_l = rq_dlg_param.rq_l.meta_row_height;
+ meta_row_height_c = rq_dlg_param.rq_c.meta_row_height;
+ swath_width_pixels_ub_l = 0;
+ swath_width_pixels_ub_c = 0;
+ scaler_rec_in_width_l = 0;
+ scaler_rec_in_width_c = 0;
+ dpte_row_height_l = rq_dlg_param.rq_l.dpte_row_height;
+ dpte_row_height_c = rq_dlg_param.rq_c.dpte_row_height;
+
+ if (mode_422) {
+ swath_width_pixels_ub_l = swath_width_ub_l * 2; // *2 for 2 pixel per element
+ swath_width_pixels_ub_c = swath_width_ub_c * 2;
+ } else {
+ swath_width_pixels_ub_l = swath_width_ub_l * 1;
+ swath_width_pixels_ub_c = swath_width_ub_c * 1;
+ }
+
+ hscale_pixel_rate_l = 0.;
+ hscale_pixel_rate_c = 0.;
+ min_hratio_fact_l = 1.0;
+ min_hratio_fact_c = 1.0;
+
+ if (hratio_l <= 1)
+ min_hratio_fact_l = 2.0;
+ else if (htaps_l <= 6) {
+ if ((hratio_l * 2.0) > 4.0)
+ min_hratio_fact_l = 4.0;
+ else
+ min_hratio_fact_l = hratio_l * 2.0;
+ } else {
+ if (hratio_l > 4.0)
+ min_hratio_fact_l = 4.0;
+ else
+ min_hratio_fact_l = hratio_l;
+ }
+
+ hscale_pixel_rate_l = min_hratio_fact_l * dppclk_freq_in_mhz;
+
+ if (hratio_c <= 1)
+ min_hratio_fact_c = 2.0;
+ else if (htaps_c <= 6) {
+ if ((hratio_c * 2.0) > 4.0)
+ min_hratio_fact_c = 4.0;
+ else
+ min_hratio_fact_c = hratio_c * 2.0;
+ } else {
+ if (hratio_c > 4.0)
+ min_hratio_fact_c = 4.0;
+ else
+ min_hratio_fact_c = hratio_c;
+ }
+
+ hscale_pixel_rate_c = min_hratio_fact_c * dppclk_freq_in_mhz;
+
+ refcyc_per_line_delivery_pre_l = 0.;
+ refcyc_per_line_delivery_pre_c = 0.;
+ refcyc_per_line_delivery_l = 0.;
+ refcyc_per_line_delivery_c = 0.;
+
+ refcyc_per_req_delivery_pre_l = 0.;
+ refcyc_per_req_delivery_pre_c = 0.;
+ refcyc_per_req_delivery_l = 0.;
+ refcyc_per_req_delivery_c = 0.;
+
+ full_recout_width = 0;
+ // In ODM
+ if (src->is_hsplit) {
+ // This "hack" is only allowed (and valid) for MPC combine. In ODM
+ // combine, you MUST specify the full_recout_width...according to Oswin
+ if (dst->full_recout_width == 0 && !dst->odm_combine) {
+ dml_print("DML_DLG: %s: Warning: full_recout_width not set in hsplit mode\n",
+ __func__);
+ full_recout_width = dst->recout_width * 2; // assume half split for dcn1
+ } else
+ full_recout_width = dst->full_recout_width;
+ } else
+ full_recout_width = dst->recout_width;
+
+ // As of DCN2, mpc_combine and odm_combine are mutually exclusive
+ refcyc_per_line_delivery_pre_l = get_refcyc_per_delivery(mode_lib,
+ refclk_freq_in_mhz,
+ pclk_freq_in_mhz,
+ dst->odm_combine,
+ full_recout_width,
+ dst->hactive,
+ vratio_pre_l,
+ hscale_pixel_rate_l,
+ swath_width_pixels_ub_l,
+ 1); // per line
+
+ refcyc_per_line_delivery_l = get_refcyc_per_delivery(mode_lib,
+ refclk_freq_in_mhz,
+ pclk_freq_in_mhz,
+ dst->odm_combine,
+ full_recout_width,
+ dst->hactive,
+ vratio_l,
+ hscale_pixel_rate_l,
+ swath_width_pixels_ub_l,
+ 1); // per line
+
+ dml_print("DML_DLG: %s: full_recout_width = %d\n",
+ __func__,
+ full_recout_width);
+ dml_print("DML_DLG: %s: hscale_pixel_rate_l = %3.2f\n",
+ __func__,
+ hscale_pixel_rate_l);
+ dml_print("DML_DLG: %s: refcyc_per_line_delivery_pre_l = %3.2f\n",
+ __func__,
+ refcyc_per_line_delivery_pre_l);
+ dml_print("DML_DLG: %s: refcyc_per_line_delivery_l = %3.2f\n",
+ __func__,
+ refcyc_per_line_delivery_l);
+
+ if (dual_plane) {
+ refcyc_per_line_delivery_pre_c = get_refcyc_per_delivery(mode_lib,
+ refclk_freq_in_mhz,
+ pclk_freq_in_mhz,
+ dst->odm_combine,
+ full_recout_width,
+ dst->hactive,
+ vratio_pre_c,
+ hscale_pixel_rate_c,
+ swath_width_pixels_ub_c,
+ 1); // per line
+
+ refcyc_per_line_delivery_c = get_refcyc_per_delivery(mode_lib,
+ refclk_freq_in_mhz,
+ pclk_freq_in_mhz,
+ dst->odm_combine,
+ full_recout_width,
+ dst->hactive,
+ vratio_c,
+ hscale_pixel_rate_c,
+ swath_width_pixels_ub_c,
+ 1); // per line
+
+ dml_print("DML_DLG: %s: refcyc_per_line_delivery_pre_c = %3.2f\n",
+ __func__,
+ refcyc_per_line_delivery_pre_c);
+ dml_print("DML_DLG: %s: refcyc_per_line_delivery_c = %3.2f\n",
+ __func__,
+ refcyc_per_line_delivery_c);
+ }
+
+ // smehta: this is a hack added until we get the real dml, sorry, need to make progress
+ if (src->dynamic_metadata_enable && src->gpuvm) {
+ unsigned int levels = mode_lib->ip.gpuvm_max_page_table_levels;
+ double ref_cycles;
+
+ if (src->hostvm)
+ levels = levels * (mode_lib->ip.hostvm_max_page_table_levels+1);
+
+ ref_cycles = (levels * mode_lib->soc.urgent_latency_vm_data_only_us) * refclk_freq_in_mhz;
+ dml_print("BENyamin: dst_y_prefetch = %f %d %f %f \n",
+ ref_cycles, levels, mode_lib->soc.urgent_latency_vm_data_only_us, refclk_freq_in_mhz);
+ disp_dlg_regs->refcyc_per_vm_dmdata = (unsigned int) ref_cycles;
+ }
+ dml_print("BENyamin: dmdta_en vm = %d %d \n",
+ src->dynamic_metadata_enable, src->vm);
+ // TTU - Luma / Chroma
+ if (access_dir) { // vertical access
+ scaler_rec_in_width_l = vp_height_l;
+ scaler_rec_in_width_c = vp_height_c;
+ } else {
+ scaler_rec_in_width_l = vp_width_l;
+ scaler_rec_in_width_c = vp_width_c;
+ }
+
+ refcyc_per_req_delivery_pre_l = get_refcyc_per_delivery(mode_lib,
+ refclk_freq_in_mhz,
+ pclk_freq_in_mhz,
+ dst->odm_combine,
+ full_recout_width,
+ dst->hactive,
+ vratio_pre_l,
+ hscale_pixel_rate_l,
+ scaler_rec_in_width_l,
+ req_per_swath_ub_l); // per req
+ refcyc_per_req_delivery_l = get_refcyc_per_delivery(mode_lib,
+ refclk_freq_in_mhz,
+ pclk_freq_in_mhz,
+ dst->odm_combine,
+ full_recout_width,
+ dst->hactive,
+ vratio_l,
+ hscale_pixel_rate_l,
+ scaler_rec_in_width_l,
+ req_per_swath_ub_l); // per req
+
+ dml_print("DML_DLG: %s: refcyc_per_req_delivery_pre_l = %3.2f\n",
+ __func__,
+ refcyc_per_req_delivery_pre_l);
+ dml_print("DML_DLG: %s: refcyc_per_req_delivery_l = %3.2f\n",
+ __func__,
+ refcyc_per_req_delivery_l);
+
+ ASSERT(refcyc_per_req_delivery_pre_l < dml_pow(2, 13));
+ ASSERT(refcyc_per_req_delivery_l < dml_pow(2, 13));
+
+ if (dual_plane) {
+ refcyc_per_req_delivery_pre_c = get_refcyc_per_delivery(mode_lib,
+ refclk_freq_in_mhz,
+ pclk_freq_in_mhz,
+ dst->odm_combine,
+ full_recout_width,
+ dst->hactive,
+ vratio_pre_c,
+ hscale_pixel_rate_c,
+ scaler_rec_in_width_c,
+ req_per_swath_ub_c); // per req
+ refcyc_per_req_delivery_c = get_refcyc_per_delivery(mode_lib,
+ refclk_freq_in_mhz,
+ pclk_freq_in_mhz,
+ dst->odm_combine,
+ full_recout_width,
+ dst->hactive,
+ vratio_c,
+ hscale_pixel_rate_c,
+ scaler_rec_in_width_c,
+ req_per_swath_ub_c); // per req
+
+ dml_print("DML_DLG: %s: refcyc_per_req_delivery_pre_c = %3.2f\n",
+ __func__,
+ refcyc_per_req_delivery_pre_c);
+ dml_print("DML_DLG: %s: refcyc_per_req_delivery_c = %3.2f\n",
+ __func__,
+ refcyc_per_req_delivery_c);
+
+ ASSERT(refcyc_per_req_delivery_pre_c < dml_pow(2, 13));
+ ASSERT(refcyc_per_req_delivery_c < dml_pow(2, 13));
+ }
+
+ // TTU - Cursor
+ refcyc_per_req_delivery_pre_cur0 = 0.0;
+ refcyc_per_req_delivery_cur0 = 0.0;
+ if (src->num_cursors > 0) {
+ calculate_ttu_cursor(mode_lib,
+ &refcyc_per_req_delivery_pre_cur0,
+ &refcyc_per_req_delivery_cur0,
+ refclk_freq_in_mhz,
+ ref_freq_to_pix_freq,
+ hscale_pixel_rate_l,
+ scl->hscl_ratio,
+ vratio_pre_l,
+ vratio_l,
+ src->cur0_src_width,
+ (enum cursor_bpp)(src->cur0_bpp));
+ }
+
+ refcyc_per_req_delivery_pre_cur1 = 0.0;
+ refcyc_per_req_delivery_cur1 = 0.0;
+ if (src->num_cursors > 1) {
+ calculate_ttu_cursor(mode_lib,
+ &refcyc_per_req_delivery_pre_cur1,
+ &refcyc_per_req_delivery_cur1,
+ refclk_freq_in_mhz,
+ ref_freq_to_pix_freq,
+ hscale_pixel_rate_l,
+ scl->hscl_ratio,
+ vratio_pre_l,
+ vratio_l,
+ src->cur1_src_width,
+ (enum cursor_bpp)(src->cur1_bpp));
+ }
+
+ // TTU - Misc
+ // all hard-coded
+
+ // Assignment to register structures
+ disp_dlg_regs->dst_y_after_scaler = dst_y_after_scaler; // in terms of line
+ ASSERT(disp_dlg_regs->dst_y_after_scaler < (unsigned int)8);
+ disp_dlg_regs->refcyc_x_after_scaler = dst_x_after_scaler * ref_freq_to_pix_freq; // in terms of refclk
+ ASSERT(disp_dlg_regs->refcyc_x_after_scaler < (unsigned int)dml_pow(2, 13));
+ disp_dlg_regs->dst_y_prefetch = (unsigned int)(dst_y_prefetch * dml_pow(2, 2));
+ disp_dlg_regs->dst_y_per_vm_vblank = (unsigned int)(dst_y_per_vm_vblank * dml_pow(2, 2));
+ disp_dlg_regs->dst_y_per_row_vblank = (unsigned int)(dst_y_per_row_vblank * dml_pow(2, 2));
+ disp_dlg_regs->dst_y_per_vm_flip = (unsigned int)(dst_y_per_vm_flip * dml_pow(2, 2));
+ disp_dlg_regs->dst_y_per_row_flip = (unsigned int)(dst_y_per_row_flip * dml_pow(2, 2));
+
+ disp_dlg_regs->vratio_prefetch = (unsigned int)(vratio_pre_l * dml_pow(2, 19));
+ disp_dlg_regs->vratio_prefetch_c = (unsigned int)(vratio_pre_c * dml_pow(2, 19));
+
+ dml_print("DML_DLG: %s: disp_dlg_regs->dst_y_per_vm_vblank = 0x%x\n", __func__, disp_dlg_regs->dst_y_per_vm_vblank);
+ dml_print("DML_DLG: %s: disp_dlg_regs->dst_y_per_row_vblank = 0x%x\n", __func__, disp_dlg_regs->dst_y_per_row_vblank);
+ dml_print("DML_DLG: %s: disp_dlg_regs->dst_y_per_vm_flip = 0x%x\n", __func__, disp_dlg_regs->dst_y_per_vm_flip);
+ dml_print("DML_DLG: %s: disp_dlg_regs->dst_y_per_row_flip = 0x%x\n", __func__, disp_dlg_regs->dst_y_per_row_flip);
+ disp_dlg_regs->refcyc_per_pte_group_vblank_l =
+ (unsigned int)(dst_y_per_row_vblank * (double)htotal
+ * ref_freq_to_pix_freq / (double)dpte_groups_per_row_ub_l);
+ ASSERT(disp_dlg_regs->refcyc_per_pte_group_vblank_l < (unsigned int)dml_pow(2, 13));
+
+ if (dual_plane) {
+ disp_dlg_regs->refcyc_per_pte_group_vblank_c = (unsigned int)(dst_y_per_row_vblank
+ * (double)htotal * ref_freq_to_pix_freq
+ / (double)dpte_groups_per_row_ub_c);
+ ASSERT(disp_dlg_regs->refcyc_per_pte_group_vblank_c
+ < (unsigned int)dml_pow(2, 13));
+ }
+
+ disp_dlg_regs->refcyc_per_meta_chunk_vblank_l =
+ (unsigned int)(dst_y_per_row_vblank * (double)htotal
+ * ref_freq_to_pix_freq / (double)meta_chunks_per_row_ub_l);
+ ASSERT(disp_dlg_regs->refcyc_per_meta_chunk_vblank_l < (unsigned int)dml_pow(2, 13));
+
+ disp_dlg_regs->refcyc_per_meta_chunk_vblank_c =
+ disp_dlg_regs->refcyc_per_meta_chunk_vblank_l; // dcc for 4:2:0 is not supported in dcn1.0. assigned to be the same as _l for now
+
+ disp_dlg_regs->refcyc_per_pte_group_flip_l = (unsigned int)(dst_y_per_row_flip * htotal
+ * ref_freq_to_pix_freq) / dpte_groups_per_row_ub_l;
+ disp_dlg_regs->refcyc_per_meta_chunk_flip_l = (unsigned int)(dst_y_per_row_flip * htotal
+ * ref_freq_to_pix_freq) / meta_chunks_per_row_ub_l;
+
+ if (dual_plane) {
+ disp_dlg_regs->refcyc_per_pte_group_flip_c = (unsigned int)(dst_y_per_row_flip
+ * htotal * ref_freq_to_pix_freq) / dpte_groups_per_row_ub_c;
+ disp_dlg_regs->refcyc_per_meta_chunk_flip_c = (unsigned int)(dst_y_per_row_flip
+ * htotal * ref_freq_to_pix_freq) / meta_chunks_per_row_ub_c;
+ }
+
+ disp_dlg_regs->refcyc_per_vm_group_vblank = get_refcyc_per_vm_group_vblank(mode_lib, e2e_pipe_param, num_pipes, pipe_idx) * refclk_freq_in_mhz;
+ disp_dlg_regs->refcyc_per_vm_group_flip = get_refcyc_per_vm_group_flip(mode_lib, e2e_pipe_param, num_pipes, pipe_idx) * refclk_freq_in_mhz;
+ disp_dlg_regs->refcyc_per_vm_req_vblank = get_refcyc_per_vm_req_vblank(mode_lib, e2e_pipe_param, num_pipes, pipe_idx) * refclk_freq_in_mhz * dml_pow(2, 10);
+ disp_dlg_regs->refcyc_per_vm_req_flip = get_refcyc_per_vm_req_flip(mode_lib, e2e_pipe_param, num_pipes, pipe_idx) * refclk_freq_in_mhz * dml_pow(2, 10);
+
+ // Clamp to max for now
+ if (disp_dlg_regs->refcyc_per_vm_group_vblank >= (unsigned int)dml_pow(2, 23))
+ disp_dlg_regs->refcyc_per_vm_group_vblank = dml_pow(2, 23) - 1;
+
+ if (disp_dlg_regs->refcyc_per_vm_group_flip >= (unsigned int)dml_pow(2, 23))
+ disp_dlg_regs->refcyc_per_vm_group_flip = dml_pow(2, 23) - 1;
+
+ if (disp_dlg_regs->refcyc_per_vm_req_vblank >= (unsigned int)dml_pow(2, 23))
+ disp_dlg_regs->refcyc_per_vm_req_vblank = dml_pow(2, 23) - 1;
+
+ if (disp_dlg_regs->refcyc_per_vm_req_flip >= (unsigned int)dml_pow(2, 23))
+ disp_dlg_regs->refcyc_per_vm_req_flip = dml_pow(2, 23) - 1;
+
+ disp_dlg_regs->dst_y_per_pte_row_nom_l = (unsigned int)((double)dpte_row_height_l
+ / (double)vratio_l * dml_pow(2, 2));
+ ASSERT(disp_dlg_regs->dst_y_per_pte_row_nom_l < (unsigned int)dml_pow(2, 17));
+
+ if (dual_plane) {
+ disp_dlg_regs->dst_y_per_pte_row_nom_c = (unsigned int)((double)dpte_row_height_c
+ / (double)vratio_c * dml_pow(2, 2));
+ if (disp_dlg_regs->dst_y_per_pte_row_nom_c >= (unsigned int)dml_pow(2, 17)) {
+ dml_print("DML_DLG: %s: Warning dst_y_per_pte_row_nom_c %u larger than supported by register format U15.2 %u\n",
+ __func__,
+ disp_dlg_regs->dst_y_per_pte_row_nom_c,
+ (unsigned int)dml_pow(2, 17) - 1);
+ }
+ }
+
+ disp_dlg_regs->dst_y_per_meta_row_nom_l = (unsigned int)((double)meta_row_height_l
+ / (double)vratio_l * dml_pow(2, 2));
+ ASSERT(disp_dlg_regs->dst_y_per_meta_row_nom_l < (unsigned int)dml_pow(2, 17));
+
+ disp_dlg_regs->dst_y_per_meta_row_nom_c = disp_dlg_regs->dst_y_per_meta_row_nom_l; // TODO: dcc for 4:2:0 is not supported in dcn1.0. assigned to be the same as _l for now
+
+ dml_print("DML: Trow: %fus\n", line_time_in_us * (double)dpte_row_height_l / (double)vratio_l);
+
+ disp_dlg_regs->refcyc_per_pte_group_nom_l = (unsigned int)((double)dpte_row_height_l
+ / (double)vratio_l * (double)htotal * ref_freq_to_pix_freq
+ / (double)dpte_groups_per_row_ub_l);
+ if (disp_dlg_regs->refcyc_per_pte_group_nom_l >= (unsigned int)dml_pow(2, 23))
+ disp_dlg_regs->refcyc_per_pte_group_nom_l = dml_pow(2, 23) - 1;
+ disp_dlg_regs->refcyc_per_meta_chunk_nom_l = (unsigned int)((double)meta_row_height_l
+ / (double)vratio_l * (double)htotal * ref_freq_to_pix_freq
+ / (double)meta_chunks_per_row_ub_l);
+ if (disp_dlg_regs->refcyc_per_meta_chunk_nom_l >= (unsigned int)dml_pow(2, 23))
+ disp_dlg_regs->refcyc_per_meta_chunk_nom_l = dml_pow(2, 23) - 1;
+
+ if (dual_plane) {
+ disp_dlg_regs->refcyc_per_pte_group_nom_c =
+ (unsigned int)((double)dpte_row_height_c / (double)vratio_c
+ * (double)htotal * ref_freq_to_pix_freq
+ / (double)dpte_groups_per_row_ub_c);
+ if (disp_dlg_regs->refcyc_per_pte_group_nom_c >= (unsigned int)dml_pow(2, 23))
+ disp_dlg_regs->refcyc_per_pte_group_nom_c = dml_pow(2, 23) - 1;
+
+ // TODO: Is this the right calculation? Does htotal need to be halved?
+ disp_dlg_regs->refcyc_per_meta_chunk_nom_c =
+ (unsigned int)((double)meta_row_height_c / (double)vratio_c
+ * (double)htotal * ref_freq_to_pix_freq
+ / (double)meta_chunks_per_row_ub_c);
+ if (disp_dlg_regs->refcyc_per_meta_chunk_nom_c >= (unsigned int)dml_pow(2, 23))
+ disp_dlg_regs->refcyc_per_meta_chunk_nom_c = dml_pow(2, 23) - 1;
+ }
+
+ disp_dlg_regs->refcyc_per_line_delivery_pre_l = (unsigned int)dml_floor(refcyc_per_line_delivery_pre_l,
+ 1);
+ disp_dlg_regs->refcyc_per_line_delivery_l = (unsigned int)dml_floor(refcyc_per_line_delivery_l,
+ 1);
+ ASSERT(disp_dlg_regs->refcyc_per_line_delivery_pre_l < (unsigned int)dml_pow(2, 13));
+ ASSERT(disp_dlg_regs->refcyc_per_line_delivery_l < (unsigned int)dml_pow(2, 13));
+
+ disp_dlg_regs->refcyc_per_line_delivery_pre_c = (unsigned int)dml_floor(refcyc_per_line_delivery_pre_c,
+ 1);
+ disp_dlg_regs->refcyc_per_line_delivery_c = (unsigned int)dml_floor(refcyc_per_line_delivery_c,
+ 1);
+ ASSERT(disp_dlg_regs->refcyc_per_line_delivery_pre_c < (unsigned int)dml_pow(2, 13));
+ ASSERT(disp_dlg_regs->refcyc_per_line_delivery_c < (unsigned int)dml_pow(2, 13));
+
+ disp_dlg_regs->chunk_hdl_adjust_cur0 = 3;
+ disp_dlg_regs->dst_y_offset_cur0 = 0;
+ disp_dlg_regs->chunk_hdl_adjust_cur1 = 3;
+ disp_dlg_regs->dst_y_offset_cur1 = 0;
+
+ disp_dlg_regs->dst_y_delta_drq_limit = 0x7fff; // off
+
+ disp_ttu_regs->refcyc_per_req_delivery_pre_l = (unsigned int)(refcyc_per_req_delivery_pre_l
+ * dml_pow(2, 10));
+ disp_ttu_regs->refcyc_per_req_delivery_l = (unsigned int)(refcyc_per_req_delivery_l
+ * dml_pow(2, 10));
+ disp_ttu_regs->refcyc_per_req_delivery_pre_c = (unsigned int)(refcyc_per_req_delivery_pre_c
+ * dml_pow(2, 10));
+ disp_ttu_regs->refcyc_per_req_delivery_c = (unsigned int)(refcyc_per_req_delivery_c
+ * dml_pow(2, 10));
+ disp_ttu_regs->refcyc_per_req_delivery_pre_cur0 =
+ (unsigned int)(refcyc_per_req_delivery_pre_cur0 * dml_pow(2, 10));
+ disp_ttu_regs->refcyc_per_req_delivery_cur0 = (unsigned int)(refcyc_per_req_delivery_cur0
+ * dml_pow(2, 10));
+ disp_ttu_regs->refcyc_per_req_delivery_pre_cur1 =
+ (unsigned int)(refcyc_per_req_delivery_pre_cur1 * dml_pow(2, 10));
+ disp_ttu_regs->refcyc_per_req_delivery_cur1 = (unsigned int)(refcyc_per_req_delivery_cur1
+ * dml_pow(2, 10));
+ disp_ttu_regs->qos_level_low_wm = 0;
+ ASSERT(disp_ttu_regs->qos_level_low_wm < dml_pow(2, 14));
+ disp_ttu_regs->qos_level_high_wm = (unsigned int)(4.0 * (double)htotal
+ * ref_freq_to_pix_freq);
+ ASSERT(disp_ttu_regs->qos_level_high_wm < dml_pow(2, 14));
+
+ disp_ttu_regs->qos_level_flip = 14;
+ disp_ttu_regs->qos_level_fixed_l = 8;
+ disp_ttu_regs->qos_level_fixed_c = 8;
+ disp_ttu_regs->qos_level_fixed_cur0 = 8;
+ disp_ttu_regs->qos_ramp_disable_l = 0;
+ disp_ttu_regs->qos_ramp_disable_c = 0;
+ disp_ttu_regs->qos_ramp_disable_cur0 = 0;
+
+ disp_ttu_regs->min_ttu_vblank = min_ttu_vblank * refclk_freq_in_mhz;
+ ASSERT(disp_ttu_regs->min_ttu_vblank < dml_pow(2, 24));
+
+ print__ttu_regs_st(mode_lib, *disp_ttu_regs);
+ print__dlg_regs_st(mode_lib, *disp_dlg_regs);
+}
+
+void dml30_rq_dlg_get_dlg_reg(struct display_mode_lib *mode_lib,
+ display_dlg_regs_st *dlg_regs,
+ display_ttu_regs_st *ttu_regs,
+ display_e2e_pipe_params_st *e2e_pipe_param,
+ const unsigned int num_pipes,
+ const unsigned int pipe_idx,
+ const bool cstate_en,
+ const bool pstate_en,
+ const bool vm_en,
+ const bool ignore_viewport_pos,
+ const bool immediate_flip_support)
+{
+ display_rq_params_st rq_param = { 0 };
+ display_dlg_sys_params_st dlg_sys_param = { 0 };
+
+ // Get watermark and Tex.
+ dlg_sys_param.t_urg_wm_us = get_wm_urgent(mode_lib, e2e_pipe_param, num_pipes);
+ dlg_sys_param.deepsleep_dcfclk_mhz = get_clk_dcf_deepsleep(mode_lib,
+ e2e_pipe_param,
+ num_pipes);
+ dlg_sys_param.t_extra_us = get_urgent_extra_latency(mode_lib, e2e_pipe_param, num_pipes);
+ dlg_sys_param.mem_trip_us = get_wm_memory_trip(mode_lib, e2e_pipe_param, num_pipes);
+ dlg_sys_param.t_mclk_wm_us = get_wm_dram_clock_change(mode_lib, e2e_pipe_param, num_pipes);
+ dlg_sys_param.t_sr_wm_us = get_wm_stutter_enter_exit(mode_lib, e2e_pipe_param, num_pipes);
+ dlg_sys_param.total_flip_bw = get_total_immediate_flip_bw(mode_lib,
+ e2e_pipe_param,
+ num_pipes);
+ dlg_sys_param.total_flip_bytes = get_total_immediate_flip_bytes(mode_lib,
+ e2e_pipe_param,
+ num_pipes);
+ dlg_sys_param.t_srx_delay_us = mode_lib->ip.dcfclk_cstate_latency
+ / dlg_sys_param.deepsleep_dcfclk_mhz; // TODO: Deprecated
+
+ print__dlg_sys_params_st(mode_lib, dlg_sys_param);
+
+ // system parameter calculation done
+
+ dml_print("DML_DLG: Calculation for pipe[%d] start\n\n", pipe_idx);
+ dml_rq_dlg_get_rq_params(mode_lib, &rq_param, e2e_pipe_param[pipe_idx].pipe);
+ dml_rq_dlg_get_dlg_params(mode_lib,
+ e2e_pipe_param,
+ num_pipes,
+ pipe_idx,
+ dlg_regs,
+ ttu_regs,
+ rq_param.dlg,
+ dlg_sys_param,
+ cstate_en,
+ pstate_en,
+ vm_en,
+ ignore_viewport_pos,
+ immediate_flip_support);
+ dml_print("DML_DLG: Calculation for pipe[%d] end\n", pipe_idx);
+}
+
+#endif
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn30/display_rq_dlg_calc_30.h b/drivers/gpu/drm/amd/display/dc/dml/dcn30/display_rq_dlg_calc_30.h
new file mode 100644
index 000000000000..e5b17e1104c6
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn30/display_rq_dlg_calc_30.h
@@ -0,0 +1,69 @@
+/*
+ * Copyright 2020 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef __DML30_DISPLAY_RQ_DLG_CALC_H__
+#define __DML30_DISPLAY_RQ_DLG_CALC_H__
+
+#include "../display_rq_dlg_helpers.h"
+
+struct display_mode_lib;
+
+// Function: dml_rq_dlg_get_rq_reg
+// Main entry point for test to get the register values out of this DML class.
+// This function calls <get_rq_param> and <extract_rq_regs> fucntions to calculate
+// and then populate the rq_regs struct
+// Input:
+// pipe_param - pipe source configuration (e.g. vp, pitch, scaling, dest, etc.)
+// Output:
+// rq_regs - struct that holds all the RQ registers field value.
+// See also: <display_rq_regs_st>
+void dml30_rq_dlg_get_rq_reg(struct display_mode_lib *mode_lib,
+ display_rq_regs_st *rq_regs,
+ const display_pipe_params_st pipe_param);
+
+// Function: dml_rq_dlg_get_dlg_reg
+// Calculate and return DLG and TTU register struct given the system setting
+// Output:
+// dlg_regs - output DLG register struct
+// ttu_regs - output DLG TTU register struct
+// Input:
+// e2e_pipe_param - "compacted" array of e2e pipe param struct
+// num_pipes - num of active "pipe" or "route"
+// pipe_idx - index that identifies the e2e_pipe_param that corresponding to this dlg
+// cstate - 0: when calculate min_ttu_vblank it is assumed cstate is not required. 1: Normal mode, cstate is considered.
+// Added for legacy or unrealistic timing tests.
+void dml30_rq_dlg_get_dlg_reg(struct display_mode_lib *mode_lib,
+ display_dlg_regs_st *dlg_regs,
+ display_ttu_regs_st *ttu_regs,
+ display_e2e_pipe_params_st *e2e_pipe_param,
+ const unsigned int num_pipes,
+ const unsigned int pipe_idx,
+ const bool cstate_en,
+ const bool pstate_en,
+ const bool vm_en,
+ const bool ignore_viewport_pos,
+ const bool immediate_flip_support);
+
+#endif
diff --git a/drivers/gpu/drm/amd/display/dc/dml/display_mode_enums.h b/drivers/gpu/drm/amd/display/dc/dml/display_mode_enums.h
index bfc2f39bd1ef..64f9c735f74d 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/display_mode_enums.h
+++ b/drivers/gpu/drm/amd/display/dc/dml/display_mode_enums.h
@@ -181,4 +181,10 @@ enum immediate_flip_requirement {
dm_immediate_flip_required,
};
+enum unbounded_requesting_policy {
+ dm_unbounded_requesting,
+ dm_unbounded_requesting_edp_only,
+ dm_unbounded_requesting_disable
+};
+
#endif
diff --git a/drivers/gpu/drm/amd/display/dc/dml/display_mode_lib.c b/drivers/gpu/drm/amd/display/dc/dml/display_mode_lib.c
index 2689401a03a3..950ba04d7503 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/display_mode_lib.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/display_mode_lib.c
@@ -31,6 +31,11 @@
#include "dcn20/display_rq_dlg_calc_20v2.h"
#include "dcn21/display_mode_vba_21.h"
#include "dcn21/display_rq_dlg_calc_21.h"
+#ifdef CONFIG_DRM_AMD_DC_DCN3_0
+#include "dcn30/display_mode_vba_30.h"
+#include "dcn30/display_rq_dlg_calc_30.h"
+#include "dml_logger.h"
+#endif
const struct dml_funcs dml20_funcs = {
.validate = dml20_ModeSupportAndSystemConfigurationFull,
@@ -53,6 +58,14 @@ const struct dml_funcs dml21_funcs = {
.rq_dlg_get_rq_reg = dml21_rq_dlg_get_rq_reg
};
+#if defined(CONFIG_DRM_AMD_DC_DCN3_0)
+const struct dml_funcs dml30_funcs = {
+ .validate = dml30_ModeSupportAndSystemConfigurationFull,
+ .recalculate = dml30_recalculate,
+ .rq_dlg_get_dlg_reg = dml30_rq_dlg_get_dlg_reg,
+ .rq_dlg_get_rq_reg = dml30_rq_dlg_get_rq_reg
+};
+#endif
void dml_init_instance(struct display_mode_lib *lib,
const struct _vcs_dpi_soc_bounding_box_st *soc_bb,
const struct _vcs_dpi_ip_params_st *ip_params,
@@ -71,6 +84,11 @@ void dml_init_instance(struct display_mode_lib *lib,
case DML_PROJECT_DCN21:
lib->funcs = dml21_funcs;
break;
+#if defined(CONFIG_DRM_AMD_DC_DCN3_0)
+ case DML_PROJECT_DCN30:
+ lib->funcs = dml30_funcs;
+ break;
+#endif
default:
break;
@@ -105,3 +123,166 @@ const char *dml_get_status_message(enum dm_validation_status status)
default: return "Unknown Status";
}
}
+#if defined(CONFIG_DRM_AMD_DC_DCN3_0)
+void dml_log_pipe_params(
+ struct display_mode_lib *mode_lib,
+ display_e2e_pipe_params_st *pipes,
+ int pipe_cnt)
+{
+ display_pipe_source_params_st *pipe_src;
+ display_pipe_dest_params_st *pipe_dest;
+ scaler_ratio_depth_st *scale_ratio_depth;
+ scaler_taps_st *scale_taps;
+ display_output_params_st *dout;
+ display_clocks_and_cfg_st *clks_cfg;
+ int i;
+
+ for (i = 0; i < pipe_cnt; i++) {
+ pipe_src = &(pipes[i].pipe.src);
+ pipe_dest = &(pipes[i].pipe.dest);
+ scale_ratio_depth = &(pipes[i].pipe.scale_ratio_depth);
+ scale_taps = &(pipes[i].pipe.scale_taps);
+ dout = &(pipes[i].dout);
+ clks_cfg = &(pipes[i].clks_cfg);
+
+ dml_print("DML PARAMS: =====================================\n");
+ dml_print("DML PARAMS: PIPE [%d] SOURCE PARAMS:\n", i);
+ dml_print("DML PARAMS: source_format = %d\n", pipe_src->source_format);
+ dml_print("DML PARAMS: dcc = %d\n", pipe_src->dcc);
+ dml_print("DML PARAMS: dcc_rate = %d\n", pipe_src->dcc_rate);
+ dml_print("DML PARAMS: dcc_use_global = %d\n", pipe_src->dcc_use_global);
+ dml_print("DML PARAMS: vm = %d\n", pipe_src->vm);
+ dml_print("DML PARAMS: gpuvm = %d\n", pipe_src->gpuvm);
+ dml_print("DML PARAMS: hostvm = %d\n", pipe_src->hostvm);
+ dml_print("DML PARAMS: gpuvm_levels_force_en = %d\n", pipe_src->gpuvm_levels_force_en);
+ dml_print("DML PARAMS: gpuvm_levels_force = %d\n", pipe_src->gpuvm_levels_force);
+ dml_print("DML PARAMS: source_scan = %d\n", pipe_src->source_scan);
+ dml_print("DML PARAMS: sw_mode = %d\n", pipe_src->sw_mode);
+ dml_print("DML PARAMS: macro_tile_size = %d\n", pipe_src->macro_tile_size);
+ dml_print("DML PARAMS: viewport_width = %d\n", pipe_src->viewport_width);
+ dml_print("DML PARAMS: viewport_height = %d\n", pipe_src->viewport_height);
+ dml_print("DML PARAMS: viewport_y_y = %d\n", pipe_src->viewport_y_y);
+ dml_print("DML PARAMS: viewport_y_c = %d\n", pipe_src->viewport_y_c);
+ dml_print("DML PARAMS: viewport_width_c = %d\n", pipe_src->viewport_width_c);
+ dml_print("DML PARAMS: viewport_height_c = %d\n", pipe_src->viewport_height_c);
+ dml_print("DML PARAMS: data_pitch = %d\n", pipe_src->data_pitch);
+ dml_print("DML PARAMS: data_pitch_c = %d\n", pipe_src->data_pitch_c);
+ dml_print("DML PARAMS: meta_pitch = %d\n", pipe_src->meta_pitch);
+ dml_print("DML PARAMS: meta_pitch_c = %d\n", pipe_src->meta_pitch_c);
+ dml_print("DML PARAMS: cur0_src_width = %d\n", pipe_src->cur0_src_width);
+ dml_print("DML PARAMS: cur0_bpp = %d\n", pipe_src->cur0_bpp);
+ dml_print("DML PARAMS: cur1_src_width = %d\n", pipe_src->cur1_src_width);
+ dml_print("DML PARAMS: cur1_bpp = %d\n", pipe_src->cur1_bpp);
+ dml_print("DML PARAMS: num_cursors = %d\n", pipe_src->num_cursors);
+ dml_print("DML PARAMS: is_hsplit = %d\n", pipe_src->is_hsplit);
+ dml_print("DML PARAMS: hsplit_grp = %d\n", pipe_src->hsplit_grp);
+ dml_print("DML PARAMS: dynamic_metadata_enable = %d\n", pipe_src->dynamic_metadata_enable);
+ dml_print("DML PARAMS: dmdata_lines_before_active = %d\n", pipe_src->dynamic_metadata_lines_before_active);
+ dml_print("DML PARAMS: dmdata_xmit_bytes = %d\n", pipe_src->dynamic_metadata_xmit_bytes);
+ dml_print("DML PARAMS: immediate_flip = %d\n", pipe_src->immediate_flip);
+ dml_print("DML PARAMS: v_total_min = %d\n", pipe_src->v_total_min);
+ dml_print("DML PARAMS: v_total_max = %d\n", pipe_src->v_total_max);
+ dml_print("DML PARAMS: =====================================\n");
+
+ dml_print("DML PARAMS: PIPE [%d] DESTINATION PARAMS:\n", i);
+ dml_print("DML PARAMS: recout_width = %d\n", pipe_dest->recout_width);
+ dml_print("DML PARAMS: recout_height = %d\n", pipe_dest->recout_height);
+ dml_print("DML PARAMS: full_recout_width = %d\n", pipe_dest->full_recout_width);
+ dml_print("DML PARAMS: full_recout_height = %d\n", pipe_dest->full_recout_height);
+ dml_print("DML PARAMS: hblank_start = %d\n", pipe_dest->hblank_start);
+ dml_print("DML PARAMS: hblank_end = %d\n", pipe_dest->hblank_end);
+ dml_print("DML PARAMS: vblank_start = %d\n", pipe_dest->vblank_start);
+ dml_print("DML PARAMS: vblank_end = %d\n", pipe_dest->vblank_end);
+ dml_print("DML PARAMS: htotal = %d\n", pipe_dest->htotal);
+ dml_print("DML PARAMS: vtotal = %d\n", pipe_dest->vtotal);
+ dml_print("DML PARAMS: vactive = %d\n", pipe_dest->vactive);
+ dml_print("DML PARAMS: hactive = %d\n", pipe_dest->hactive);
+ dml_print("DML PARAMS: vstartup_start = %d\n", pipe_dest->vstartup_start);
+ dml_print("DML PARAMS: vupdate_offset = %d\n", pipe_dest->vupdate_offset);
+ dml_print("DML PARAMS: vupdate_width = %d\n", pipe_dest->vupdate_width);
+ dml_print("DML PARAMS: vready_offset = %d\n", pipe_dest->vready_offset);
+ dml_print("DML PARAMS: interlaced = %d\n", pipe_dest->interlaced);
+ dml_print("DML PARAMS: pixel_rate_mhz = %3.2f\n", pipe_dest->pixel_rate_mhz);
+ dml_print("DML PARAMS: sync_vblank_all_planes = %d\n", pipe_dest->synchronized_vblank_all_planes);
+ dml_print("DML PARAMS: otg_inst = %d\n", pipe_dest->otg_inst);
+ dml_print("DML PARAMS: odm_combine = %d\n", pipe_dest->odm_combine);
+ dml_print("DML PARAMS: use_maximum_vstartup = %d\n", pipe_dest->use_maximum_vstartup);
+ dml_print("DML PARAMS: vtotal_max = %d\n", pipe_dest->vtotal_max);
+ dml_print("DML PARAMS: vtotal_min = %d\n", pipe_dest->vtotal_min);
+ dml_print("DML PARAMS: =====================================\n");
+
+ dml_print("DML PARAMS: PIPE [%d] SCALER PARAMS:\n", i);
+ dml_print("DML PARAMS: hscl_ratio = %3.4f\n", scale_ratio_depth->hscl_ratio);
+ dml_print("DML PARAMS: vscl_ratio = %3.4f\n", scale_ratio_depth->vscl_ratio);
+ dml_print("DML PARAMS: hscl_ratio_c = %3.4f\n", scale_ratio_depth->hscl_ratio_c);
+ dml_print("DML PARAMS: vscl_ratio_c = %3.4f\n", scale_ratio_depth->vscl_ratio_c);
+ dml_print("DML PARAMS: vinit = %3.4f\n", scale_ratio_depth->vinit);
+ dml_print("DML PARAMS: vinit_c = %3.4f\n", scale_ratio_depth->vinit_c);
+ dml_print("DML PARAMS: vinit_bot = %3.4f\n", scale_ratio_depth->vinit_bot);
+ dml_print("DML PARAMS: vinit_bot_c = %3.4f\n", scale_ratio_depth->vinit_bot_c);
+ dml_print("DML PARAMS: lb_depth = %d\n", scale_ratio_depth->lb_depth);
+ dml_print("DML PARAMS: scl_enable = %d\n", scale_ratio_depth->scl_enable);
+ dml_print("DML PARAMS: htaps = %d\n", scale_taps->htaps);
+ dml_print("DML PARAMS: vtaps = %d\n", scale_taps->vtaps);
+ dml_print("DML PARAMS: htaps_c = %d\n", scale_taps->htaps_c);
+ dml_print("DML PARAMS: vtaps_c = %d\n", scale_taps->vtaps_c);
+ dml_print("DML PARAMS: =====================================\n");
+
+ dml_print("DML PARAMS: PIPE [%d] DISPLAY OUTPUT PARAMS:\n", i);
+ dml_print("DML PARAMS: output_type = %d\n", dout->output_type);
+ dml_print("DML PARAMS: output_format = %d\n", dout->output_format);
+ dml_print("DML PARAMS: output_bpc = %d\n", dout->output_bpc);
+ dml_print("DML PARAMS: output_bpp = %3.4f\n", dout->output_bpp);
+ dml_print("DML PARAMS: dp_lanes = %d\n", dout->dp_lanes);
+ dml_print("DML PARAMS: dsc_enable = %d\n", dout->dsc_enable);
+ dml_print("DML PARAMS: dsc_slices = %d\n", dout->dsc_slices);
+ dml_print("DML PARAMS: wb_enable = %d\n", dout->wb_enable);
+ dml_print("DML PARAMS: num_active_wb = %d\n", dout->num_active_wb);
+ dml_print("DML PARAMS: =====================================\n");
+
+ dml_print("DML PARAMS: PIPE [%d] CLOCK CONFIG PARAMS:\n", i);
+ dml_print("DML PARAMS: voltage = %d\n", clks_cfg->voltage);
+ dml_print("DML PARAMS: dppclk_mhz = %3.2f\n", clks_cfg->dppclk_mhz);
+ dml_print("DML PARAMS: refclk_mhz = %3.2f\n", clks_cfg->refclk_mhz);
+ dml_print("DML PARAMS: dispclk_mhz = %3.2f\n", clks_cfg->dispclk_mhz);
+ dml_print("DML PARAMS: dcfclk_mhz = %3.2f\n", clks_cfg->dcfclk_mhz);
+ dml_print("DML PARAMS: socclk_mhz = %3.2f\n", clks_cfg->socclk_mhz);
+ dml_print("DML PARAMS: =====================================\n");
+ }
+}
+
+void dml_log_mode_support_params(struct display_mode_lib *mode_lib)
+{
+ int i;
+
+ for (i = mode_lib->vba.soc.num_states; i >= 0; i--) {
+ dml_print("DML SUPPORT: ===============================================\n");
+ dml_print("DML SUPPORT: Voltage State %d\n", i);
+ dml_print("DML SUPPORT: Mode Supported : %s\n", mode_lib->vba.ModeSupport[i][0] ? "Supported" : "NOT Supported");
+ dml_print("DML SUPPORT: Mode Supported (pipe split) : %s\n", mode_lib->vba.ModeSupport[i][1] ? "Supported" : "NOT Supported");
+ dml_print("DML SUPPORT: Scale Ratio And Taps : %s\n", mode_lib->vba.ScaleRatioAndTapsSupport ? "Supported" : "NOT Supported");
+ dml_print("DML SUPPORT: Source Format Pixel And Scan : %s\n", mode_lib->vba.SourceFormatPixelAndScanSupport ? "Supported" : "NOT Supported");
+ dml_print("DML SUPPORT: Viewport Size : [%s, %s]\n", mode_lib->vba.ViewportSizeSupport[i][0] ? "Supported" : "NOT Supported", mode_lib->vba.ViewportSizeSupport[i][1] ? "Supported" : "NOT Supported");
+ dml_print("DML SUPPORT: DIO Support : %s\n", mode_lib->vba.DIOSupport[i] ? "Supported" : "NOT Supported");
+ dml_print("DML SUPPORT: ODM Combine 4To1 Support Check : %s\n", mode_lib->vba.ODMCombine4To1SupportCheckOK[i] ? "Supported" : "NOT Supported");
+ dml_print("DML SUPPORT: DSC Units : %s\n", mode_lib->vba.NotEnoughDSCUnits[i] ? "Not Supported" : "Supported");
+ dml_print("DML SUPPORT: DSCCLK Required : %s\n", mode_lib->vba.DSCCLKRequiredMoreThanSupported[i] ? "Not Supported" : "Supported");
+ dml_print("DML SUPPORT: DTBCLK Required : %s\n", mode_lib->vba.DTBCLKRequiredMoreThanSupported[i] ? "Not Supported" : "Supported");
+ dml_print("DML SUPPORT: Re-ordering Buffer : [%s, %s]\n", mode_lib->vba.ROBSupport[i][0] ? "Supported" : "NOT Supported", mode_lib->vba.ROBSupport[i][1] ? "Supported" : "NOT Supported");
+ dml_print("DML SUPPORT: DISPCLK and DPPCLK : [%s, %s]\n", mode_lib->vba.DISPCLK_DPPCLK_Support[i][0] ? "Supported" : "NOT Supported", mode_lib->vba.DISPCLK_DPPCLK_Support[i][1] ? "Supported" : "NOT Supported");
+ dml_print("DML SUPPORT: Total Available Pipes : [%s, %s]\n", mode_lib->vba.TotalAvailablePipesSupport[i][0] ? "Supported" : "NOT Supported", mode_lib->vba.TotalAvailablePipesSupport[i][1] ? "Supported" : "NOT Supported");
+ dml_print("DML SUPPORT: Writeback Latency : %s\n", mode_lib->vba.WritebackLatencySupport ? "Supported" : "NOT Supported");
+ dml_print("DML SUPPORT: Writeback Scale Ratio And Taps : %s\n", mode_lib->vba.WritebackScaleRatioAndTapsSupport ? "Supported" : "NOT Supported");
+ dml_print("DML SUPPORT: Cursor : %s\n", mode_lib->vba.CursorSupport ? "Supported" : "NOT Supported");
+ dml_print("DML SUPPORT: Pitch : %s\n", mode_lib->vba.PitchSupport ? "Supported" : "NOT Supported");
+ dml_print("DML SUPPORT: Prefetch : [%s, %s]\n", mode_lib->vba.PrefetchSupported[i][0] ? "Supported" : "NOT Supported", mode_lib->vba.PrefetchSupported[i][1] ? "Supported" : "NOT Supported");
+ dml_print("DML SUPPORT: Dynamic Metadata : [%s, %s]\n", mode_lib->vba.DynamicMetadataSupported[i][0] ? "Supported" : "NOT Supported", mode_lib->vba.DynamicMetadataSupported[i][1] ? "Supported" : "NOT Supported");
+ dml_print("DML SUPPORT: Total Vertical Active Bandwidth : [%s, %s]\n", mode_lib->vba.TotalVerticalActiveBandwidthSupport[i][0] ? "Supported" : "NOT Supported", mode_lib->vba.TotalVerticalActiveBandwidthSupport[i][1] ? "Supported" : "NOT Supported");
+ dml_print("DML SUPPORT: VRatio In Prefetch : [%s, %s]\n", mode_lib->vba.VRatioInPrefetchSupported[i][0] ? "Supported" : "NOT Supported", mode_lib->vba.VRatioInPrefetchSupported[i][1] ? "Supported" : "NOT Supported");
+ dml_print("DML SUPPORT: PTE Buffer Size Not Exceeded : [%s, %s]\n", mode_lib->vba.PTEBufferSizeNotExceeded[i][0] ? "Supported" : "NOT Supported", mode_lib->vba.PTEBufferSizeNotExceeded[i][1] ? "Supported" : "NOT Supported");
+ dml_print("DML SUPPORT: DSC Input BPC : %s\n", mode_lib->vba.NonsupportedDSCInputBPC ? "Not Supported" : "Supported");
+ dml_print("DML SUPPORT: HostVMEnable : %d\n", mode_lib->vba.HostVMEnable);
+ dml_print("DML SUPPORT: ImmediateFlipSupportedForState : [%d, %d]\n", mode_lib->vba.ImmediateFlipSupportedForState[i][0], mode_lib->vba.ImmediateFlipSupportedForState[i][1]);
+ }
+}
+#endif
diff --git a/drivers/gpu/drm/amd/display/dc/dml/display_mode_lib.h b/drivers/gpu/drm/amd/display/dc/dml/display_mode_lib.h
index c77c3d827e4a..6adee8a9ee56 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/display_mode_lib.h
+++ b/drivers/gpu/drm/amd/display/dc/dml/display_mode_lib.h
@@ -37,6 +37,9 @@ enum dml_project {
DML_PROJECT_NAVI10,
DML_PROJECT_NAVI10v2,
DML_PROJECT_DCN21,
+#ifdef CONFIG_DRM_AMD_DC_DCN3_0
+ DML_PROJECT_DCN30,
+#endif
};
struct display_mode_lib;
@@ -78,4 +81,12 @@ void dml_init_instance(struct display_mode_lib *lib,
const char *dml_get_status_message(enum dm_validation_status status);
+#if defined(CONFIG_DRM_AMD_DC_DCN3_0)
+void dml_log_pipe_params(
+ struct display_mode_lib *mode_lib,
+ display_e2e_pipe_params_st *pipes,
+ int pipe_cnt);
+
+void dml_log_mode_support_params(struct display_mode_lib *mode_lib);
+#endif // CONFIG_DRM_AMD_DC_DCN3_0
#endif
diff --git a/drivers/gpu/drm/amd/display/dc/dml/display_mode_structs.h b/drivers/gpu/drm/amd/display/dc/dml/display_mode_structs.h
index 439ffd04be34..0fac7f754604 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/display_mode_structs.h
+++ b/drivers/gpu/drm/amd/display/dc/dml/display_mode_structs.h
@@ -82,6 +82,7 @@ struct _vcs_dpi_soc_bounding_box_st {
double pct_ideal_dram_sdp_bw_after_urgent_pixel_only; // PercentOfIdealDRAMFabricAndSDPPortBWReceivedAfterUrgLatencyPixelDataOnly
double pct_ideal_dram_sdp_bw_after_urgent_pixel_and_vm;
double pct_ideal_dram_sdp_bw_after_urgent_vm_only;
+ double pct_ideal_sdp_bw_after_urgent;
double max_avg_sdp_bw_use_normal_percent;
double max_avg_dram_bw_use_normal_percent;
unsigned int max_request_size_bytes;
@@ -123,8 +124,12 @@ struct _vcs_dpi_soc_bounding_box_st {
struct _vcs_dpi_ip_params_st {
bool use_min_dcfclk;
+#ifdef CONFIG_DRM_AMD_DC_DCN3_0
+ bool clamp_min_dcfclk;
+#endif
bool gpuvm_enable;
bool hostvm_enable;
+ bool dsc422_native_support;
unsigned int gpuvm_max_page_table_levels;
unsigned int hostvm_max_page_table_levels;
unsigned int hostvm_cached_page_table_levels;
@@ -143,6 +148,7 @@ struct _vcs_dpi_ip_params_st {
unsigned char pte_enable;
unsigned int pte_chunk_size_kbytes;
unsigned int meta_chunk_size_kbytes;
+ unsigned int min_meta_chunk_size_bytes;
unsigned int writeback_chunk_size_kbytes;
unsigned int line_buffer_size_bits;
unsigned int max_line_buffer_lines;
@@ -158,6 +164,7 @@ struct _vcs_dpi_ip_params_st {
double writeback_max_vscl_ratio;
double writeback_min_hscl_ratio;
double writeback_min_vscl_ratio;
+ double maximum_dsc_bits_per_component;
unsigned int writeback_max_hscl_taps;
unsigned int writeback_max_vscl_taps;
unsigned int writeback_line_buffer_luma_buffer_size;
@@ -219,11 +226,14 @@ struct _vcs_dpi_display_xfc_params_st {
struct _vcs_dpi_display_pipe_source_params_st {
int source_format;
+ double dcc_fraction_of_zs_req_luma;
+ double dcc_fraction_of_zs_req_chroma;
unsigned char dcc;
unsigned int dcc_rate;
unsigned int dcc_rate_chroma;
unsigned char dcc_use_global;
unsigned char vm;
+ bool unbounded_req_mode;
bool gpuvm; // gpuvm enabled
bool hostvm; // hostvm enabled
bool gpuvm_levels_force_en;
@@ -324,6 +334,8 @@ struct _vcs_dpi_display_pipe_dest_params_st {
unsigned int vblank_end;
unsigned int htotal;
unsigned int vtotal;
+ unsigned int refresh_rate;
+ unsigned int vfront_porch;
unsigned int vactive;
unsigned int hactive;
unsigned int vstartup_start;
@@ -333,6 +345,7 @@ struct _vcs_dpi_display_pipe_dest_params_st {
unsigned char interlaced;
double pixel_rate_mhz;
unsigned char synchronized_vblank_all_planes;
+ unsigned char synchronize_timing_if_single_refresh_rate;
unsigned char otg_inst;
unsigned int odm_combine;
unsigned char use_maximum_vstartup;
@@ -469,6 +482,7 @@ struct _vcs_dpi_display_dlg_regs_st {
unsigned int refcyc_per_vm_req_vblank;
unsigned int refcyc_per_vm_req_flip;
unsigned int refcyc_per_vm_dmdata;
+ unsigned int dmdata_dl_delta;
};
struct _vcs_dpi_display_ttu_regs_st {
diff --git a/drivers/gpu/drm/amd/display/dc/dml/display_mode_vba.c b/drivers/gpu/drm/amd/display/dc/dml/display_mode_vba.c
index b19988f54721..7916a7ea9336 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/display_mode_vba.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/display_mode_vba.c
@@ -91,15 +91,13 @@ dml_get_attr_func(wm_stutter_exit, mode_lib->vba.StutterExitWatermark);
dml_get_attr_func(wm_stutter_enter_exit, mode_lib->vba.StutterEnterPlusExitWatermark);
dml_get_attr_func(wm_dram_clock_change, mode_lib->vba.DRAMClockChangeWatermark);
dml_get_attr_func(wm_writeback_dram_clock_change, mode_lib->vba.WritebackDRAMClockChangeWatermark);
-dml_get_attr_func(wm_xfc_underflow, mode_lib->vba.UrgentWatermark); // xfc_underflow maps to urgent
dml_get_attr_func(stutter_efficiency, mode_lib->vba.StutterEfficiency);
dml_get_attr_func(stutter_efficiency_no_vblank, mode_lib->vba.StutterEfficiencyNotIncludingVBlank);
+dml_get_attr_func(stutter_period, mode_lib->vba.StutterPeriod);
dml_get_attr_func(urgent_latency, mode_lib->vba.UrgentLatency);
dml_get_attr_func(urgent_extra_latency, mode_lib->vba.UrgentExtraLatency);
dml_get_attr_func(nonurgent_latency, mode_lib->vba.NonUrgentLatencyTolerance);
-dml_get_attr_func(
- dram_clock_change_latency,
- mode_lib->vba.MinActiveDRAMClockChangeLatencySupported);
+dml_get_attr_func(dram_clock_change_latency, mode_lib->vba.MinActiveDRAMClockChangeLatencySupported);
dml_get_attr_func(dispclk_calculated, mode_lib->vba.DISPCLK_calculated);
dml_get_attr_func(total_data_read_bw, mode_lib->vba.TotalDataReadBandwidth);
dml_get_attr_func(return_bw, mode_lib->vba.ReturnBW);
@@ -119,6 +117,7 @@ dml_get_pipe_attr_func(dsc_delay, mode_lib->vba.DSCDelay);
dml_get_pipe_attr_func(dppclk_calculated, mode_lib->vba.DPPCLK_calculated);
dml_get_pipe_attr_func(dscclk_calculated, mode_lib->vba.DSCCLK_calculated);
dml_get_pipe_attr_func(min_ttu_vblank, mode_lib->vba.MinTTUVBlank);
+dml_get_pipe_attr_func(min_ttu_vblank_in_us, mode_lib->vba.MinTTUVBlank);
dml_get_pipe_attr_func(vratio_prefetch_l, mode_lib->vba.VRatioPrefetchY);
dml_get_pipe_attr_func(vratio_prefetch_c, mode_lib->vba.VRatioPrefetchC);
dml_get_pipe_attr_func(dst_x_after_scaler, mode_lib->vba.DSTXAfterScaler);
@@ -127,18 +126,37 @@ dml_get_pipe_attr_func(dst_y_per_vm_vblank, mode_lib->vba.DestinationLinesToRequ
dml_get_pipe_attr_func(dst_y_per_row_vblank, mode_lib->vba.DestinationLinesToRequestRowInVBlank);
dml_get_pipe_attr_func(dst_y_prefetch, mode_lib->vba.DestinationLinesForPrefetch);
dml_get_pipe_attr_func(dst_y_per_vm_flip, mode_lib->vba.DestinationLinesToRequestVMInImmediateFlip);
-dml_get_pipe_attr_func(
- dst_y_per_row_flip,
- mode_lib->vba.DestinationLinesToRequestRowInImmediateFlip);
-
-dml_get_pipe_attr_func(xfc_transfer_delay, mode_lib->vba.XFCTransferDelay);
-dml_get_pipe_attr_func(xfc_precharge_delay, mode_lib->vba.XFCPrechargeDelay);
-dml_get_pipe_attr_func(xfc_remote_surface_flip_latency, mode_lib->vba.XFCRemoteSurfaceFlipLatency);
-dml_get_pipe_attr_func(xfc_prefetch_margin, mode_lib->vba.XFCPrefetchMargin);
+dml_get_pipe_attr_func(dst_y_per_row_flip, mode_lib->vba.DestinationLinesToRequestRowInImmediateFlip);
dml_get_pipe_attr_func(refcyc_per_vm_group_vblank, mode_lib->vba.TimePerVMGroupVBlank);
dml_get_pipe_attr_func(refcyc_per_vm_group_flip, mode_lib->vba.TimePerVMGroupFlip);
dml_get_pipe_attr_func(refcyc_per_vm_req_vblank, mode_lib->vba.TimePerVMRequestVBlank);
dml_get_pipe_attr_func(refcyc_per_vm_req_flip, mode_lib->vba.TimePerVMRequestFlip);
+dml_get_pipe_attr_func(refcyc_per_vm_group_vblank_in_us, mode_lib->vba.TimePerVMGroupVBlank);
+dml_get_pipe_attr_func(refcyc_per_vm_group_flip_in_us, mode_lib->vba.TimePerVMGroupFlip);
+dml_get_pipe_attr_func(refcyc_per_vm_req_vblank_in_us, mode_lib->vba.TimePerVMRequestVBlank);
+dml_get_pipe_attr_func(refcyc_per_vm_req_flip_in_us, mode_lib->vba.TimePerVMRequestFlip);
+dml_get_pipe_attr_func(refcyc_per_vm_dmdata_in_us, mode_lib->vba.Tdmdl_vm);
+dml_get_pipe_attr_func(dmdata_dl_delta_in_us, mode_lib->vba.Tdmdl);
+dml_get_pipe_attr_func(refcyc_per_line_delivery_l_in_us, mode_lib->vba.DisplayPipeLineDeliveryTimeLuma);
+dml_get_pipe_attr_func(refcyc_per_line_delivery_c_in_us, mode_lib->vba.DisplayPipeLineDeliveryTimeChroma);
+dml_get_pipe_attr_func(refcyc_per_line_delivery_pre_l_in_us, mode_lib->vba.DisplayPipeLineDeliveryTimeLumaPrefetch);
+dml_get_pipe_attr_func(refcyc_per_line_delivery_pre_c_in_us, mode_lib->vba.DisplayPipeLineDeliveryTimeChromaPrefetch);
+dml_get_pipe_attr_func(refcyc_per_req_delivery_l_in_us, mode_lib->vba.DisplayPipeRequestDeliveryTimeLuma);
+dml_get_pipe_attr_func(refcyc_per_req_delivery_c_in_us, mode_lib->vba.DisplayPipeRequestDeliveryTimeChroma);
+dml_get_pipe_attr_func(refcyc_per_req_delivery_pre_l_in_us, mode_lib->vba.DisplayPipeRequestDeliveryTimeLumaPrefetch);
+dml_get_pipe_attr_func(refcyc_per_req_delivery_pre_c_in_us, mode_lib->vba.DisplayPipeRequestDeliveryTimeChromaPrefetch);
+dml_get_pipe_attr_func(refcyc_per_cursor_req_delivery_in_us, mode_lib->vba.CursorRequestDeliveryTime);
+dml_get_pipe_attr_func(refcyc_per_cursor_req_delivery_pre_in_us, mode_lib->vba.CursorRequestDeliveryTimePrefetch);
+dml_get_pipe_attr_func(refcyc_per_meta_chunk_nom_l_in_us, mode_lib->vba.TimePerMetaChunkNominal);
+dml_get_pipe_attr_func(refcyc_per_meta_chunk_nom_c_in_us, mode_lib->vba.TimePerChromaMetaChunkNominal);
+dml_get_pipe_attr_func(refcyc_per_meta_chunk_vblank_l_in_us, mode_lib->vba.TimePerMetaChunkVBlank);
+dml_get_pipe_attr_func(refcyc_per_meta_chunk_vblank_c_in_us, mode_lib->vba.TimePerChromaMetaChunkVBlank);
+dml_get_pipe_attr_func(refcyc_per_meta_chunk_flip_l_in_us, mode_lib->vba.TimePerMetaChunkFlip);
+dml_get_pipe_attr_func(refcyc_per_meta_chunk_flip_c_in_us, mode_lib->vba.TimePerChromaMetaChunkFlip);
+
+dml_get_pipe_attr_func(vupdate_offset, mode_lib->vba.VUpdateOffsetPix);
+dml_get_pipe_attr_func(vupdate_width, mode_lib->vba.VUpdateWidthPix);
+dml_get_pipe_attr_func(vready_offset, mode_lib->vba.VReadyOffsetPix);
unsigned int get_vstartup_calculated(
struct display_mode_lib *mode_lib,
@@ -282,6 +300,9 @@ static void fetch_ip_params(struct display_mode_lib *mode_lib)
// IP Parameters
mode_lib->vba.UseMinimumRequiredDCFCLK = ip->use_min_dcfclk;
+#ifdef CONFIG_DRM_AMD_DC_DCN3_0
+ mode_lib->vba.ClampMinDCFCLK = ip->clamp_min_dcfclk;
+#endif
mode_lib->vba.MaxNumDPP = ip->max_num_dpp;
mode_lib->vba.MaxNumOTG = ip->max_num_otg;
mode_lib->vba.MaxNumHDMIFRLOutputs = ip->max_num_hdmi_frl_outputs;
@@ -293,8 +314,10 @@ static void fetch_ip_params(struct display_mode_lib *mode_lib)
mode_lib->vba.MaxPSCLToLBThroughput = ip->max_pscl_lb_bw_pix_per_clk;
mode_lib->vba.ROBBufferSizeInKByte = ip->rob_buffer_size_kbytes;
mode_lib->vba.DETBufferSizeInKByte = ip->det_buffer_size_kbytes;
+
mode_lib->vba.PixelChunkSizeInKByte = ip->pixel_chunk_size_kbytes;
mode_lib->vba.MetaChunkSize = ip->meta_chunk_size_kbytes;
+ mode_lib->vba.MinMetaChunkSizeBytes = ip->min_meta_chunk_size_bytes;
mode_lib->vba.WritebackChunkSize = ip->writeback_chunk_size_kbytes;
mode_lib->vba.LineBufferSize = ip->line_buffer_size_bits;
mode_lib->vba.MaxLineBufferLines = ip->max_line_buffer_lines;
@@ -365,6 +388,8 @@ static void fetch_pipe_params(struct display_mode_lib *mode_lib)
visited[k] = false;
mode_lib->vba.NumberOfActivePlanes = 0;
+ mode_lib->vba.ImmediateFlipSupport = false;
+ mode_lib->vba.ImmediateFlipRequirement = dm_immediate_flip_not_required;
for (j = 0; j < mode_lib->vba.cache_num_pipes; ++j) {
display_pipe_source_params_st *src = &pipes[j].pipe.src;
display_pipe_dest_params_st *dst = &pipes[j].pipe.dest;
@@ -425,9 +450,7 @@ static void fetch_pipe_params(struct display_mode_lib *mode_lib)
/* TODO: Needs to be set based on src->dcc_rate_luma/chroma */
mode_lib->vba.DCCRateLuma[mode_lib->vba.NumberOfActivePlanes] = src->dcc_rate;
mode_lib->vba.DCCRateChroma[mode_lib->vba.NumberOfActivePlanes] = src->dcc_rate_chroma;
-
- mode_lib->vba.SourcePixelFormat[mode_lib->vba.NumberOfActivePlanes] =
- (enum source_format_class) (src->source_format);
+ mode_lib->vba.SourcePixelFormat[mode_lib->vba.NumberOfActivePlanes] = (enum source_format_class) (src->source_format);
mode_lib->vba.HActive[mode_lib->vba.NumberOfActivePlanes] = dst->hactive;
mode_lib->vba.VActive[mode_lib->vba.NumberOfActivePlanes] = dst->vactive;
mode_lib->vba.SurfaceTiling[mode_lib->vba.NumberOfActivePlanes] =
@@ -600,14 +623,14 @@ static void fetch_pipe_params(struct display_mode_lib *mode_lib)
mode_lib->vba.ViewportWidth[mode_lib->vba.NumberOfActivePlanes] +=
src_k->viewport_width;
mode_lib->vba.ViewportWidthChroma[mode_lib->vba.NumberOfActivePlanes] +=
- src_k->viewport_width;
+ src_k->viewport_width_c;
mode_lib->vba.ScalerRecoutWidth[mode_lib->vba.NumberOfActivePlanes] +=
dst_k->recout_width;
} else {
mode_lib->vba.ViewportHeight[mode_lib->vba.NumberOfActivePlanes] +=
src_k->viewport_height;
mode_lib->vba.ViewportHeightChroma[mode_lib->vba.NumberOfActivePlanes] +=
- src_k->viewport_height;
+ src_k->viewport_height_c;
}
mode_lib->vba.NumberOfDSCSlices[mode_lib->vba.NumberOfActivePlanes] +=
dout_k->dsc_slices;
@@ -617,8 +640,10 @@ static void fetch_pipe_params(struct display_mode_lib *mode_lib)
}
}
- if (pipes[k].pipe.src.immediate_flip)
+ if (pipes[k].pipe.src.immediate_flip) {
mode_lib->vba.ImmediateFlipSupport = true;
+ mode_lib->vba.ImmediateFlipRequirement = dm_immediate_flip_required;
+ }
mode_lib->vba.NumberOfActivePlanes++;
}
@@ -648,10 +673,12 @@ static void fetch_pipe_params(struct display_mode_lib *mode_lib)
// TODO: ODMCombineEnabled => 2 * DPPPerPlane...actually maybe not since all pipes are specified
// Do we want the dscclk to automatically be halved? Guess not since the value is specified
-
+ mode_lib->vba.SynchronizeTimingsIfSingleRefreshRate = pipes[0].pipe.dest.synchronize_timing_if_single_refresh_rate;
mode_lib->vba.SynchronizedVBlank = pipes[0].pipe.dest.synchronized_vblank_all_planes;
- for (k = 1; k < mode_lib->vba.cache_num_pipes; ++k)
+ for (k = 1; k < mode_lib->vba.cache_num_pipes; ++k) {
+ ASSERT(mode_lib->vba.SynchronizeTimingsIfSingleRefreshRate == pipes[k].pipe.dest.synchronize_timing_if_single_refresh_rate);
ASSERT(mode_lib->vba.SynchronizedVBlank == pipes[k].pipe.dest.synchronized_vblank_all_planes);
+ }
mode_lib->vba.GPUVMEnable = false;
mode_lib->vba.HostVMEnable = false;
diff --git a/drivers/gpu/drm/amd/display/dc/dml/display_mode_vba.h b/drivers/gpu/drm/amd/display/dc/dml/display_mode_vba.h
index 3f559e725ab1..f615815c73bd 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/display_mode_vba.h
+++ b/drivers/gpu/drm/amd/display/dc/dml/display_mode_vba.h
@@ -41,9 +41,9 @@ dml_get_attr_decl(wm_stutter_exit);
dml_get_attr_decl(wm_stutter_enter_exit);
dml_get_attr_decl(wm_dram_clock_change);
dml_get_attr_decl(wm_writeback_dram_clock_change);
-dml_get_attr_decl(wm_xfc_underflow);
dml_get_attr_decl(stutter_efficiency_no_vblank);
dml_get_attr_decl(stutter_efficiency);
+dml_get_attr_decl(stutter_period);
dml_get_attr_decl(urgent_latency);
dml_get_attr_decl(urgent_extra_latency);
dml_get_attr_decl(nonurgent_latency);
@@ -61,6 +61,7 @@ dml_get_pipe_attr_decl(dsc_delay);
dml_get_pipe_attr_decl(dppclk_calculated);
dml_get_pipe_attr_decl(dscclk_calculated);
dml_get_pipe_attr_decl(min_ttu_vblank);
+dml_get_pipe_attr_decl(min_ttu_vblank_in_us);
dml_get_pipe_attr_decl(vratio_prefetch_l);
dml_get_pipe_attr_decl(vratio_prefetch_c);
dml_get_pipe_attr_decl(dst_x_after_scaler);
@@ -70,14 +71,36 @@ dml_get_pipe_attr_decl(dst_y_per_row_vblank);
dml_get_pipe_attr_decl(dst_y_prefetch);
dml_get_pipe_attr_decl(dst_y_per_vm_flip);
dml_get_pipe_attr_decl(dst_y_per_row_flip);
-dml_get_pipe_attr_decl(xfc_transfer_delay);
-dml_get_pipe_attr_decl(xfc_precharge_delay);
-dml_get_pipe_attr_decl(xfc_remote_surface_flip_latency);
-dml_get_pipe_attr_decl(xfc_prefetch_margin);
dml_get_pipe_attr_decl(refcyc_per_vm_group_vblank);
dml_get_pipe_attr_decl(refcyc_per_vm_group_flip);
dml_get_pipe_attr_decl(refcyc_per_vm_req_vblank);
dml_get_pipe_attr_decl(refcyc_per_vm_req_flip);
+dml_get_pipe_attr_decl(refcyc_per_vm_group_vblank_in_us);
+dml_get_pipe_attr_decl(refcyc_per_vm_group_flip_in_us);
+dml_get_pipe_attr_decl(refcyc_per_vm_req_vblank_in_us);
+dml_get_pipe_attr_decl(refcyc_per_vm_req_flip_in_us);
+dml_get_pipe_attr_decl(refcyc_per_vm_dmdata_in_us);
+dml_get_pipe_attr_decl(dmdata_dl_delta_in_us);
+dml_get_pipe_attr_decl(refcyc_per_line_delivery_l_in_us);
+dml_get_pipe_attr_decl(refcyc_per_line_delivery_c_in_us);
+dml_get_pipe_attr_decl(refcyc_per_line_delivery_pre_l_in_us);
+dml_get_pipe_attr_decl(refcyc_per_line_delivery_pre_c_in_us);
+dml_get_pipe_attr_decl(refcyc_per_req_delivery_l_in_us);
+dml_get_pipe_attr_decl(refcyc_per_req_delivery_c_in_us);
+dml_get_pipe_attr_decl(refcyc_per_req_delivery_pre_l_in_us);
+dml_get_pipe_attr_decl(refcyc_per_req_delivery_pre_c_in_us);
+dml_get_pipe_attr_decl(refcyc_per_cursor_req_delivery_in_us);
+dml_get_pipe_attr_decl(refcyc_per_cursor_req_delivery_pre_in_us);
+dml_get_pipe_attr_decl(refcyc_per_meta_chunk_nom_l_in_us);
+dml_get_pipe_attr_decl(refcyc_per_meta_chunk_nom_c_in_us);
+dml_get_pipe_attr_decl(refcyc_per_meta_chunk_vblank_l_in_us);
+dml_get_pipe_attr_decl(refcyc_per_meta_chunk_vblank_c_in_us);
+dml_get_pipe_attr_decl(refcyc_per_meta_chunk_flip_l_in_us);
+dml_get_pipe_attr_decl(refcyc_per_meta_chunk_flip_c_in_us);
+
+dml_get_pipe_attr_decl(vupdate_offset);
+dml_get_pipe_attr_decl(vupdate_width);
+dml_get_pipe_attr_decl(vready_offset);
unsigned int get_vstartup_calculated(
struct display_mode_lib *mode_lib,
@@ -229,8 +252,7 @@ struct vba_vars_st {
unsigned int OverrideGPUVMPageTableLevels;
unsigned int OverrideHostVMPageTableLevels;
unsigned int MetaChunkSize;
- double MinPixelChunkSizeBytes;
- double MinMetaChunkSizeBytes;
+ unsigned int MinMetaChunkSizeBytes;
unsigned int WritebackChunkSize;
bool ODMCapability;
unsigned int NumberOfDSC;
@@ -344,8 +366,8 @@ struct vba_vars_st {
unsigned int EffectiveLBLatencyHidingSourceLinesLuma;
unsigned int EffectiveLBLatencyHidingSourceLinesChroma;
double BandwidthAvailableForImmediateFlip;
- unsigned int PrefetchMode[DC__VOLTAGE_STATES + 1][2];
- unsigned int PrefetchModePerState[DC__VOLTAGE_STATES + 1][2];
+ unsigned int PrefetchMode[DC__VOLTAGE_STATES][2];
+ unsigned int PrefetchModePerState[DC__VOLTAGE_STATES][2];
unsigned int MinPrefetchMode;
unsigned int MaxPrefetchMode;
bool AnyLinesForVMOrRowTooLarge;
@@ -393,16 +415,16 @@ struct vba_vars_st {
unsigned int MaxNumWriteback;
bool WritebackLumaAndChromaScalingSupported;
bool Cursor64BppSupport;
- double DCFCLKPerState[DC__VOLTAGE_STATES + 1];
- double DCFCLKState[DC__VOLTAGE_STATES + 1][2];
- double FabricClockPerState[DC__VOLTAGE_STATES + 1];
- double SOCCLKPerState[DC__VOLTAGE_STATES + 1];
- double PHYCLKPerState[DC__VOLTAGE_STATES + 1];
- double DTBCLKPerState[DC__VOLTAGE_STATES + 1];
- double MaxDppclk[DC__VOLTAGE_STATES + 1];
- double MaxDSCCLK[DC__VOLTAGE_STATES + 1];
- double DRAMSpeedPerState[DC__VOLTAGE_STATES + 1];
- double MaxDispclk[DC__VOLTAGE_STATES + 1];
+ double DCFCLKPerState[DC__VOLTAGE_STATES];
+ double DCFCLKState[DC__VOLTAGE_STATES][2];
+ double FabricClockPerState[DC__VOLTAGE_STATES];
+ double SOCCLKPerState[DC__VOLTAGE_STATES];
+ double PHYCLKPerState[DC__VOLTAGE_STATES];
+ double DTBCLKPerState[DC__VOLTAGE_STATES];
+ double MaxDppclk[DC__VOLTAGE_STATES];
+ double MaxDSCCLK[DC__VOLTAGE_STATES];
+ double DRAMSpeedPerState[DC__VOLTAGE_STATES];
+ double MaxDispclk[DC__VOLTAGE_STATES];
int VoltageOverrideLevel;
/*outputs*/
@@ -413,11 +435,11 @@ struct vba_vars_st {
bool WritebackLatencySupport;
bool WritebackModeSupport;
bool Writeback10bpc420Supported;
- bool BandwidthSupport[DC__VOLTAGE_STATES + 1];
+ bool BandwidthSupport[DC__VOLTAGE_STATES];
unsigned int TotalNumberOfActiveWriteback;
double CriticalPoint;
double ReturnBWToDCNPerState;
- bool IsErrorResult[DC__VOLTAGE_STATES + 1][2][DC__NUM_DPP__MAX];
+ bool IsErrorResult[DC__VOLTAGE_STATES][2][DC__NUM_DPP__MAX];
bool prefetch_vm_bw_valid;
bool prefetch_row_bw_valid;
bool NumberOfOTGSupport;
@@ -425,7 +447,7 @@ struct vba_vars_st {
bool WritebackScaleRatioAndTapsSupport;
bool CursorSupport;
bool PitchSupport;
- enum dm_validation_status ValidationStatus[DC__VOLTAGE_STATES + 1];
+ enum dm_validation_status ValidationStatus[DC__VOLTAGE_STATES];
double WritebackLineBufferLumaBufferSize;
double WritebackLineBufferChromaBufferSize;
@@ -443,7 +465,7 @@ struct vba_vars_st {
double OutputLinkDPLanes[DC__NUM_DPP__MAX];
double ForcedOutputLinkBPP[DC__NUM_DPP__MAX]; // Mode Support only
double ImmediateFlipBW[DC__NUM_DPP__MAX];
- double MaxMaxVStartup[DC__VOLTAGE_STATES + 1][2];
+ double MaxMaxVStartup[DC__VOLTAGE_STATES][2];
double WritebackLumaVExtra;
double WritebackChromaVExtra;
@@ -470,7 +492,7 @@ struct vba_vars_st {
double RoundedUpMaxSwathSizeBytesC;
double EffectiveDETLBLinesLuma;
double EffectiveDETLBLinesChroma;
- double ProjectedDCFCLKDeepSleep[DC__VOLTAGE_STATES + 1][2];
+ double ProjectedDCFCLKDeepSleep[DC__VOLTAGE_STATES][2];
double PDEAndMetaPTEBytesPerFrameY;
double PDEAndMetaPTEBytesPerFrameC;
unsigned int MetaRowBytesY;
@@ -488,47 +510,47 @@ struct vba_vars_st {
double FractionOfUrgentBandwidthImmediateFlip; // Mode Support debugging output
/* ms locals */
- double IdealSDPPortBandwidthPerState[DC__VOLTAGE_STATES + 1][2];
- unsigned int NoOfDPP[DC__VOLTAGE_STATES + 1][2][DC__NUM_DPP__MAX];
+ double IdealSDPPortBandwidthPerState[DC__VOLTAGE_STATES][2];
+ unsigned int NoOfDPP[DC__VOLTAGE_STATES][2][DC__NUM_DPP__MAX];
int NoOfDPPThisState[DC__NUM_DPP__MAX];
- enum odm_combine_mode ODMCombineEnablePerState[DC__VOLTAGE_STATES + 1][DC__NUM_DPP__MAX];
+ enum odm_combine_mode ODMCombineEnablePerState[DC__VOLTAGE_STATES][DC__NUM_DPP__MAX];
double SwathWidthYThisState[DC__NUM_DPP__MAX];
- unsigned int SwathHeightCPerState[DC__VOLTAGE_STATES + 1][2][DC__NUM_DPP__MAX];
+ unsigned int SwathHeightCPerState[DC__VOLTAGE_STATES][2][DC__NUM_DPP__MAX];
unsigned int SwathHeightYThisState[DC__NUM_DPP__MAX];
unsigned int SwathHeightCThisState[DC__NUM_DPP__MAX];
- double VRatioPreY[DC__VOLTAGE_STATES + 1][2][DC__NUM_DPP__MAX];
- double VRatioPreC[DC__VOLTAGE_STATES + 1][2][DC__NUM_DPP__MAX];
- double RequiredPrefetchPixelDataBWLuma[DC__VOLTAGE_STATES + 1][2][DC__NUM_DPP__MAX];
- double RequiredPrefetchPixelDataBWChroma[DC__VOLTAGE_STATES + 1][2][DC__NUM_DPP__MAX];
- double RequiredDPPCLK[DC__VOLTAGE_STATES + 1][2][DC__NUM_DPP__MAX];
+ double VRatioPreY[DC__VOLTAGE_STATES][2][DC__NUM_DPP__MAX];
+ double VRatioPreC[DC__VOLTAGE_STATES][2][DC__NUM_DPP__MAX];
+ double RequiredPrefetchPixelDataBWLuma[DC__VOLTAGE_STATES][2][DC__NUM_DPP__MAX];
+ double RequiredPrefetchPixelDataBWChroma[DC__VOLTAGE_STATES][2][DC__NUM_DPP__MAX];
+ double RequiredDPPCLK[DC__VOLTAGE_STATES][2][DC__NUM_DPP__MAX];
double RequiredDPPCLKThisState[DC__NUM_DPP__MAX];
- bool PTEBufferSizeNotExceededY[DC__VOLTAGE_STATES + 1][2][DC__NUM_DPP__MAX];
- bool PTEBufferSizeNotExceededC[DC__VOLTAGE_STATES + 1][2][DC__NUM_DPP__MAX];
- bool BandwidthWithoutPrefetchSupported[DC__VOLTAGE_STATES + 1][2];
- bool PrefetchSupported[DC__VOLTAGE_STATES + 1][2];
- bool VRatioInPrefetchSupported[DC__VOLTAGE_STATES + 1][2];
- double RequiredDISPCLK[DC__VOLTAGE_STATES + 1][2];
- bool DISPCLK_DPPCLK_Support[DC__VOLTAGE_STATES + 1][2];
- bool TotalAvailablePipesSupport[DC__VOLTAGE_STATES + 1][2];
- unsigned int TotalNumberOfActiveDPP[DC__VOLTAGE_STATES + 1][2];
- unsigned int TotalNumberOfDCCActiveDPP[DC__VOLTAGE_STATES + 1][2];
- bool ModeSupport[DC__VOLTAGE_STATES + 1][2];
- double ReturnBWPerState[DC__VOLTAGE_STATES + 1][2];
- bool DIOSupport[DC__VOLTAGE_STATES + 1];
- bool NotEnoughDSCUnits[DC__VOLTAGE_STATES + 1];
- bool DSCCLKRequiredMoreThanSupported[DC__VOLTAGE_STATES + 1];
- bool DTBCLKRequiredMoreThanSupported[DC__VOLTAGE_STATES + 1];
- double UrgentRoundTripAndOutOfOrderLatencyPerState[DC__VOLTAGE_STATES + 1];
- bool ROBSupport[DC__VOLTAGE_STATES + 1][2];
- bool PTEBufferSizeNotExceeded[DC__VOLTAGE_STATES + 1][2];
- bool TotalVerticalActiveBandwidthSupport[DC__VOLTAGE_STATES + 1][2];
- double MaxTotalVerticalActiveAvailableBandwidth[DC__VOLTAGE_STATES + 1][2];
+ bool PTEBufferSizeNotExceededY[DC__VOLTAGE_STATES][2][DC__NUM_DPP__MAX];
+ bool PTEBufferSizeNotExceededC[DC__VOLTAGE_STATES][2][DC__NUM_DPP__MAX];
+ bool BandwidthWithoutPrefetchSupported[DC__VOLTAGE_STATES][2];
+ bool PrefetchSupported[DC__VOLTAGE_STATES][2];
+ bool VRatioInPrefetchSupported[DC__VOLTAGE_STATES][2];
+ double RequiredDISPCLK[DC__VOLTAGE_STATES][2];
+ bool DISPCLK_DPPCLK_Support[DC__VOLTAGE_STATES][2];
+ bool TotalAvailablePipesSupport[DC__VOLTAGE_STATES][2];
+ unsigned int TotalNumberOfActiveDPP[DC__VOLTAGE_STATES][2];
+ unsigned int TotalNumberOfDCCActiveDPP[DC__VOLTAGE_STATES][2];
+ bool ModeSupport[DC__VOLTAGE_STATES][2];
+ double ReturnBWPerState[DC__VOLTAGE_STATES][2];
+ bool DIOSupport[DC__VOLTAGE_STATES];
+ bool NotEnoughDSCUnits[DC__VOLTAGE_STATES];
+ bool DSCCLKRequiredMoreThanSupported[DC__VOLTAGE_STATES];
+ bool DTBCLKRequiredMoreThanSupported[DC__VOLTAGE_STATES];
+ double UrgentRoundTripAndOutOfOrderLatencyPerState[DC__VOLTAGE_STATES];
+ bool ROBSupport[DC__VOLTAGE_STATES][2];
+ bool PTEBufferSizeNotExceeded[DC__VOLTAGE_STATES][2];
+ bool TotalVerticalActiveBandwidthSupport[DC__VOLTAGE_STATES][2];
+ double MaxTotalVerticalActiveAvailableBandwidth[DC__VOLTAGE_STATES][2];
double PrefetchBW[DC__NUM_DPP__MAX];
- double PDEAndMetaPTEBytesPerFrame[DC__VOLTAGE_STATES + 1][2][DC__NUM_DPP__MAX];
- double MetaRowBytes[DC__VOLTAGE_STATES + 1][2][DC__NUM_DPP__MAX];
- double DPTEBytesPerRow[DC__VOLTAGE_STATES + 1][2][DC__NUM_DPP__MAX];
- double PrefetchLinesY[DC__VOLTAGE_STATES + 1][2][DC__NUM_DPP__MAX];
- double PrefetchLinesC[DC__VOLTAGE_STATES + 1][2][DC__NUM_DPP__MAX];
+ double PDEAndMetaPTEBytesPerFrame[DC__VOLTAGE_STATES][2][DC__NUM_DPP__MAX];
+ double MetaRowBytes[DC__VOLTAGE_STATES][2][DC__NUM_DPP__MAX];
+ double DPTEBytesPerRow[DC__VOLTAGE_STATES][2][DC__NUM_DPP__MAX];
+ double PrefetchLinesY[DC__VOLTAGE_STATES][2][DC__NUM_DPP__MAX];
+ double PrefetchLinesC[DC__VOLTAGE_STATES][2][DC__NUM_DPP__MAX];
unsigned int MaxNumSwY[DC__NUM_DPP__MAX];
unsigned int MaxNumSwC[DC__NUM_DPP__MAX];
double PrefillY[DC__NUM_DPP__MAX];
@@ -540,12 +562,12 @@ struct vba_vars_st {
double SwathWidthYSingleDPP[DC__NUM_DPP__MAX];
double BytePerPixelInDETY[DC__NUM_DPP__MAX];
double BytePerPixelInDETC[DC__NUM_DPP__MAX];
- bool RequiresDSC[DC__VOLTAGE_STATES + 1][DC__NUM_DPP__MAX];
- unsigned int NumberOfDSCSlice[DC__VOLTAGE_STATES + 1][DC__NUM_DPP__MAX];
- double RequiresFEC[DC__VOLTAGE_STATES + 1][DC__NUM_DPP__MAX];
- double OutputBppPerState[DC__VOLTAGE_STATES + 1][DC__NUM_DPP__MAX];
- double DSCDelayPerState[DC__VOLTAGE_STATES + 1][DC__NUM_DPP__MAX];
- bool ViewportSizeSupport[DC__VOLTAGE_STATES + 1][2];
+ bool RequiresDSC[DC__VOLTAGE_STATES][DC__NUM_DPP__MAX];
+ unsigned int NumberOfDSCSlice[DC__VOLTAGE_STATES][DC__NUM_DPP__MAX];
+ double RequiresFEC[DC__VOLTAGE_STATES][DC__NUM_DPP__MAX];
+ double OutputBppPerState[DC__VOLTAGE_STATES][DC__NUM_DPP__MAX];
+ double DSCDelayPerState[DC__VOLTAGE_STATES][DC__NUM_DPP__MAX];
+ bool ViewportSizeSupport[DC__VOLTAGE_STATES][2];
unsigned int Read256BlockHeightY[DC__NUM_DPP__MAX];
unsigned int Read256BlockWidthY[DC__NUM_DPP__MAX];
unsigned int Read256BlockHeightC[DC__NUM_DPP__MAX];
@@ -560,7 +582,7 @@ struct vba_vars_st {
double WriteBandwidth[DC__NUM_DPP__MAX];
double PSCL_FACTOR[DC__NUM_DPP__MAX];
double PSCL_FACTOR_CHROMA[DC__NUM_DPP__MAX];
- double MaximumVStartup[DC__VOLTAGE_STATES + 1][2][DC__NUM_DPP__MAX];
+ double MaximumVStartup[DC__VOLTAGE_STATES][2][DC__NUM_DPP__MAX];
unsigned int MacroTileWidthY[DC__NUM_DPP__MAX];
unsigned int MacroTileWidthC[DC__NUM_DPP__MAX];
double AlignedDCCMetaPitch[DC__NUM_DPP__MAX];
@@ -574,8 +596,8 @@ struct vba_vars_st {
double DestinationLinesToRequestVMInImmediateFlip[DC__NUM_DPP__MAX];
double DestinationLinesToRequestRowInImmediateFlip[DC__NUM_DPP__MAX];
double final_flip_bw[DC__NUM_DPP__MAX];
- bool ImmediateFlipSupportedForState[DC__VOLTAGE_STATES + 1][2];
- double WritebackDelay[DC__VOLTAGE_STATES + 1][DC__NUM_DPP__MAX];
+ bool ImmediateFlipSupportedForState[DC__VOLTAGE_STATES][2];
+ double WritebackDelay[DC__VOLTAGE_STATES][DC__NUM_DPP__MAX];
unsigned int vm_group_bytes[DC__NUM_DPP__MAX];
unsigned int dpte_group_bytes[DC__NUM_DPP__MAX];
unsigned int dpte_row_height[DC__NUM_DPP__MAX];
@@ -595,7 +617,7 @@ struct vba_vars_st {
double DisplayPipeLineDeliveryTimeChroma[DC__NUM_DPP__MAX]; // WM
double DisplayPipeRequestDeliveryTimeLuma[DC__NUM_DPP__MAX];
double DisplayPipeRequestDeliveryTimeChroma[DC__NUM_DPP__MAX];
- enum clock_change_support DRAMClockChangeSupport[DC__VOLTAGE_STATES + 1][2];
+ enum clock_change_support DRAMClockChangeSupport[DC__VOLTAGE_STATES][2];
double UrgentBurstFactorCursor[DC__NUM_DPP__MAX];
double UrgentBurstFactorCursorPre[DC__NUM_DPP__MAX];
double UrgentBurstFactorLuma[DC__NUM_DPP__MAX];
@@ -604,7 +626,7 @@ struct vba_vars_st {
double UrgentBurstFactorChromaPre[DC__NUM_DPP__MAX];
- bool MPCCombine[DC__VOLTAGE_STATES + 1][2][DC__NUM_DPP__MAX];
+ bool MPCCombine[DC__VOLTAGE_STATES][2][DC__NUM_DPP__MAX];
double SwathWidthCSingleDPP[DC__NUM_DPP__MAX];
double MaximumSwathWidthInLineBufferLuma;
double MaximumSwathWidthInLineBufferChroma;
@@ -619,6 +641,7 @@ struct vba_vars_st {
double dummy6;
double dummy7[DC__NUM_DPP__MAX];
double dummy8[DC__NUM_DPP__MAX];
+ double dummy13[DC__NUM_DPP__MAX];
unsigned int dummyinteger1ms[DC__NUM_DPP__MAX];
double dummyinteger2ms[DC__NUM_DPP__MAX];
unsigned int dummyinteger3[DC__NUM_DPP__MAX];
@@ -631,6 +654,9 @@ struct vba_vars_st {
unsigned int dummyinteger10;
unsigned int dummyinteger11;
unsigned int dummyinteger12;
+ unsigned int dummyinteger30;
+ unsigned int dummyinteger31;
+ unsigned int dummyinteger32;
unsigned int dummyintegerarr1[DC__NUM_DPP__MAX];
unsigned int dummyintegerarr2[DC__NUM_DPP__MAX];
unsigned int dummyintegerarr3[DC__NUM_DPP__MAX];
@@ -639,9 +665,9 @@ struct vba_vars_st {
bool SingleDPPViewportSizeSupportPerPlane[DC__NUM_DPP__MAX];
double PlaneRequiredDISPCLKWithODMCombine2To1;
double PlaneRequiredDISPCLKWithODMCombine4To1;
- unsigned int TotalNumberOfSingleDPPPlanes[DC__VOLTAGE_STATES + 1][2];
+ unsigned int TotalNumberOfSingleDPPPlanes[DC__VOLTAGE_STATES][2];
bool LinkDSCEnable;
- bool ODMCombine4To1SupportCheckOK[DC__VOLTAGE_STATES + 1];
+ bool ODMCombine4To1SupportCheckOK[DC__VOLTAGE_STATES];
enum odm_combine_mode ODMCombineEnableThisState[DC__NUM_DPP__MAX];
double SwathWidthCThisState[DC__NUM_DPP__MAX];
bool ViewportSizeSupportPerPlane[DC__NUM_DPP__MAX];
@@ -765,6 +791,7 @@ struct vba_vars_st {
double FinalDRAMClockChangeLatency;
double Tdmdl_vm[DC__NUM_DPP__MAX];
double Tdmdl[DC__NUM_DPP__MAX];
+ double TSetup[DC__NUM_DPP__MAX];
unsigned int ThisVStartup;
bool WritebackAllowDRAMClockChangeEndPosition[DC__NUM_DPP__MAX];
double DST_Y_PER_META_ROW_NOM_C[DC__NUM_DPP__MAX];
@@ -785,12 +812,12 @@ struct vba_vars_st {
unsigned int ImmediateFlipBytes[DC__NUM_DPP__MAX];
unsigned int LinesInDETC[DC__NUM_DPP__MAX];
unsigned int LinesInDETCRoundedDownToSwath[DC__NUM_DPP__MAX];
- double UrgentLatencySupportUsPerState[DC__VOLTAGE_STATES + 1][2][DC__NUM_DPP__MAX];
+ double UrgentLatencySupportUsPerState[DC__VOLTAGE_STATES][2][DC__NUM_DPP__MAX];
double UrgentLatencySupportUs[DC__NUM_DPP__MAX];
- double FabricAndDRAMBandwidthPerState[DC__VOLTAGE_STATES + 1];
- bool UrgentLatencySupport[DC__VOLTAGE_STATES + 1][2];
- unsigned int SwathWidthYPerState[DC__VOLTAGE_STATES + 1][2][DC__NUM_DPP__MAX];
- unsigned int SwathHeightYPerState[DC__VOLTAGE_STATES + 1][2][DC__NUM_DPP__MAX];
+ double FabricAndDRAMBandwidthPerState[DC__VOLTAGE_STATES];
+ bool UrgentLatencySupport[DC__VOLTAGE_STATES][2];
+ unsigned int SwathWidthYPerState[DC__VOLTAGE_STATES][2][DC__NUM_DPP__MAX];
+ unsigned int SwathHeightYPerState[DC__VOLTAGE_STATES][2][DC__NUM_DPP__MAX];
double qual_row_bw[DC__NUM_DPP__MAX];
double prefetch_row_bw[DC__NUM_DPP__MAX];
double prefetch_vm_bw[DC__NUM_DPP__MAX];
@@ -838,7 +865,7 @@ struct vba_vars_st {
double DCCRateLuma[DC__NUM_DPP__MAX];
double DCCRateChroma[DC__NUM_DPP__MAX];
- double PHYCLKD18PerState[DC__VOLTAGE_STATES + 1];
+ double PHYCLKD18PerState[DC__VOLTAGE_STATES];
bool WritebackSupportInterleaveAndUsingWholeBufferForASingleStream;
bool NumberOfHDMIFRLSupport;
@@ -847,7 +874,7 @@ struct vba_vars_st {
int AudioSampleLayout[DC__NUM_DPP__MAX];
int PercentMarginOverMinimumRequiredDCFCLK;
- bool DynamicMetadataSupported[DC__VOLTAGE_STATES + 1][2];
+ bool DynamicMetadataSupported[DC__VOLTAGE_STATES][2];
enum immediate_flip_requirement ImmediateFlipRequirement;
double DETBufferSizeYThisState[DC__NUM_DPP__MAX];
double DETBufferSizeCThisState[DC__NUM_DPP__MAX];
@@ -855,26 +882,26 @@ struct vba_vars_st {
bool NoUrgentLatencyHidingPre[DC__NUM_DPP__MAX];
int swath_width_luma_ub_this_state[DC__NUM_DPP__MAX];
int swath_width_chroma_ub_this_state[DC__NUM_DPP__MAX];
- double UrgLatency[DC__VOLTAGE_STATES + 1];
- double VActiveCursorBandwidth[DC__VOLTAGE_STATES + 1][2][DC__NUM_DPP__MAX];
- double VActivePixelBandwidth[DC__VOLTAGE_STATES + 1][2][DC__NUM_DPP__MAX];
- bool NoTimeForPrefetch[DC__VOLTAGE_STATES + 1][2][DC__NUM_DPP__MAX];
- bool NoTimeForDynamicMetadata[DC__VOLTAGE_STATES + 1][2][DC__NUM_DPP__MAX];
- double dpte_row_bandwidth[DC__VOLTAGE_STATES + 1][2][DC__NUM_DPP__MAX];
- double meta_row_bandwidth[DC__VOLTAGE_STATES + 1][2][DC__NUM_DPP__MAX];
- double DETBufferSizeYAllStates[DC__VOLTAGE_STATES + 1][2][DC__NUM_DPP__MAX];
- double DETBufferSizeCAllStates[DC__VOLTAGE_STATES + 1][2][DC__NUM_DPP__MAX];
- int swath_width_luma_ub_all_states[DC__VOLTAGE_STATES + 1][2][DC__NUM_DPP__MAX];
- int swath_width_chroma_ub_all_states[DC__VOLTAGE_STATES + 1][2][DC__NUM_DPP__MAX];
- bool NotUrgentLatencyHiding[DC__VOLTAGE_STATES + 1][2];
- unsigned int SwathHeightYAllStates[DC__VOLTAGE_STATES + 1][2][DC__NUM_DPP__MAX];
- unsigned int SwathHeightCAllStates[DC__VOLTAGE_STATES + 1][2][DC__NUM_DPP__MAX];
- unsigned int SwathWidthYAllStates[DC__VOLTAGE_STATES + 1][2][DC__NUM_DPP__MAX];
- unsigned int SwathWidthCAllStates[DC__VOLTAGE_STATES + 1][2][DC__NUM_DPP__MAX];
- double TotalDPTERowBandwidth[DC__VOLTAGE_STATES + 1][2];
- double TotalMetaRowBandwidth[DC__VOLTAGE_STATES + 1][2];
- double TotalVActiveCursorBandwidth[DC__VOLTAGE_STATES + 1][2];
- double TotalVActivePixelBandwidth[DC__VOLTAGE_STATES + 1][2];
+ double UrgLatency[DC__VOLTAGE_STATES];
+ double VActiveCursorBandwidth[DC__VOLTAGE_STATES][2][DC__NUM_DPP__MAX];
+ double VActivePixelBandwidth[DC__VOLTAGE_STATES][2][DC__NUM_DPP__MAX];
+ bool NoTimeForPrefetch[DC__VOLTAGE_STATES][2][DC__NUM_DPP__MAX];
+ bool NoTimeForDynamicMetadata[DC__VOLTAGE_STATES][2][DC__NUM_DPP__MAX];
+ double dpte_row_bandwidth[DC__VOLTAGE_STATES][2][DC__NUM_DPP__MAX];
+ double meta_row_bandwidth[DC__VOLTAGE_STATES][2][DC__NUM_DPP__MAX];
+ double DETBufferSizeYAllStates[DC__VOLTAGE_STATES][2][DC__NUM_DPP__MAX];
+ double DETBufferSizeCAllStates[DC__VOLTAGE_STATES][2][DC__NUM_DPP__MAX];
+ int swath_width_luma_ub_all_states[DC__VOLTAGE_STATES][2][DC__NUM_DPP__MAX];
+ int swath_width_chroma_ub_all_states[DC__VOLTAGE_STATES][2][DC__NUM_DPP__MAX];
+ bool NotUrgentLatencyHiding[DC__VOLTAGE_STATES][2];
+ unsigned int SwathHeightYAllStates[DC__VOLTAGE_STATES][2][DC__NUM_DPP__MAX];
+ unsigned int SwathHeightCAllStates[DC__VOLTAGE_STATES][2][DC__NUM_DPP__MAX];
+ unsigned int SwathWidthYAllStates[DC__VOLTAGE_STATES][2][DC__NUM_DPP__MAX];
+ unsigned int SwathWidthCAllStates[DC__VOLTAGE_STATES][2][DC__NUM_DPP__MAX];
+ double TotalDPTERowBandwidth[DC__VOLTAGE_STATES][2];
+ double TotalMetaRowBandwidth[DC__VOLTAGE_STATES][2];
+ double TotalVActiveCursorBandwidth[DC__VOLTAGE_STATES][2];
+ double TotalVActivePixelBandwidth[DC__VOLTAGE_STATES][2];
double WritebackDelayTime[DC__NUM_DPP__MAX];
unsigned int DCCYIndependentBlock[DC__NUM_DPP__MAX];
unsigned int DCCCIndependentBlock[DC__NUM_DPP__MAX];
@@ -897,7 +924,12 @@ struct vba_vars_st {
double BPP;
enum odm_combine_policy ODMCombinePolicy;
bool UseMinimumRequiredDCFCLK;
+#ifdef CONFIG_DRM_AMD_DC_DCN3_0
+ bool ClampMinDCFCLK;
+#endif
bool AllowDramClockChangeOneDisplayVactive;
+ bool SynchronizeTimingsIfSingleRefreshRate;
+
};
bool CalculateMinAndMaxPrefetchMode(
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dml_inline_defs.h b/drivers/gpu/drm/amd/display/dc/dml/dml_inline_defs.h
index 02e06c9b3230..479d7d83220c 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dml_inline_defs.h
+++ b/drivers/gpu/drm/amd/display/dc/dml/dml_inline_defs.h
@@ -86,9 +86,20 @@ static inline double dml_round(double a)
return floor;
}
+/* float
+static inline int dml_log2(float x)
+{
+ unsigned int ix = *((unsigned int *)&x);
+
+ return (int)((ix >> 23) & 0xff) - 127;
+}*/
+
+/* double */
static inline int dml_log2(double x)
{
- return dml_round((double)dcn_bw_log(x, 2));
+ unsigned long long ix = *((unsigned long long *)&x);
+
+ return (int)((ix >> 52) & 0x7ff) - 1023;
}
static inline double dml_pow(double a, int exp)
@@ -116,11 +127,6 @@ static inline double dml_floor_ex(double x, double granularity)
return (double) dcn_bw_floor2(x, granularity);
}
-static inline double dml_log(double x, double base)
-{
- return (double) dcn_bw_log(x, base);
-}
-
static inline unsigned int dml_round_to_multiple(unsigned int num,
unsigned int multiple,
unsigned char up)
diff --git a/drivers/gpu/drm/amd/display/dc/dsc/rc_calc.c b/drivers/gpu/drm/amd/display/dc/dsc/rc_calc.c
index 667afbc260f9..4da21966ddce 100644
--- a/drivers/gpu/drm/amd/display/dc/dsc/rc_calc.c
+++ b/drivers/gpu/drm/amd/display/dc/dsc/rc_calc.c
@@ -175,7 +175,7 @@ static int median3(int a, int b, int c)
}
static void _do_calc_rc_params(struct rc_params *rc, enum colour_mode cm,
- enum bits_per_comp bpc, u8 drm_bpp,
+ enum bits_per_comp bpc, u16 drm_bpp,
bool is_navite_422_or_420,
int slice_width, int slice_height,
int minor_version)
@@ -265,7 +265,7 @@ static void _do_calc_rc_params(struct rc_params *rc, enum colour_mode cm,
rc->rc_buf_thresh[13] = 8064;
}
-static u32 _do_bytes_per_pixel_calc(int slice_width, u8 drm_bpp,
+static u32 _do_bytes_per_pixel_calc(int slice_width, u16 drm_bpp,
bool is_navite_422_or_420)
{
float bpp;
@@ -321,7 +321,7 @@ void calc_rc_params(struct rc_params *rc, const struct drm_dsc_config *pps)
enum colour_mode mode;
enum bits_per_comp bpc;
bool is_navite_422_or_420;
- u8 drm_bpp = pps->bits_per_pixel;
+ u16 drm_bpp = pps->bits_per_pixel;
int slice_width = pps->slice_width;
int slice_height = pps->slice_height;
@@ -357,7 +357,7 @@ u32 calc_dsc_bytes_per_pixel(const struct drm_dsc_config *pps)
{
u32 ret;
- u8 drm_bpp = pps->bits_per_pixel;
+ u16 drm_bpp = pps->bits_per_pixel;
int slice_width = pps->slice_width;
bool is_navite_422_or_420 = pps->native_422 || pps->native_420;
diff --git a/drivers/gpu/drm/amd/display/dc/gpio/Makefile b/drivers/gpu/drm/amd/display/dc/gpio/Makefile
index 202baa210cc8..0f2f4508e564 100644
--- a/drivers/gpu/drm/amd/display/dc/gpio/Makefile
+++ b/drivers/gpu/drm/amd/display/dc/gpio/Makefile
@@ -87,6 +87,16 @@ AMD_DAL_GPIO_DCN21 = $(addprefix $(AMDDALPATH)/dc/gpio/dcn21/,$(GPIO_DCN21))
AMD_DISPLAY_FILES += $(AMD_DAL_GPIO_DCN21)
endif
###############################################################################
+# DCN 3
+###############################################################################
+ifdef CONFIG_DRM_AMD_DC_DCN3_0
+GPIO_DCN30 = hw_translate_dcn30.o hw_factory_dcn30.o
+
+AMD_DAL_GPIO_DCN30 = $(addprefix $(AMDDALPATH)/dc/gpio/dcn30/,$(GPIO_DCN30))
+
+AMD_DISPLAY_FILES += $(AMD_DAL_GPIO_DCN30)
+endif
+###############################################################################
# Diagnostics on FPGA
###############################################################################
GPIO_DIAG_FPGA = hw_translate_diag.o hw_factory_diag.o
diff --git a/drivers/gpu/drm/amd/display/dc/gpio/dcn30/hw_factory_dcn30.c b/drivers/gpu/drm/amd/display/dc/gpio/dcn30/hw_factory_dcn30.c
new file mode 100644
index 000000000000..7e7fb6572107
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/gpio/dcn30/hw_factory_dcn30.c
@@ -0,0 +1,257 @@
+/*
+ * Copyright 2020 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+#if defined(CONFIG_DRM_AMD_DC_DCN3_0)
+#include "dm_services.h"
+#include "include/gpio_types.h"
+#include "../hw_factory.h"
+
+
+#include "../hw_gpio.h"
+#include "../hw_ddc.h"
+#include "../hw_hpd.h"
+#include "../hw_generic.h"
+
+#include "hw_factory_dcn30.h"
+
+
+#include "sienna_cichlid_ip_offset.h"
+#include "dcn/dcn_3_0_0_offset.h"
+#include "dcn/dcn_3_0_0_sh_mask.h"
+
+#include "nbio/nbio_7_4_offset.h"
+
+#include "dcn/dpcs_3_0_0_offset.h"
+#include "dcn/dpcs_3_0_0_sh_mask.h"
+
+#include "mmhub/mmhub_2_0_0_offset.h"
+#include "mmhub/mmhub_2_0_0_sh_mask.h"
+
+#include "reg_helper.h"
+#include "../hpd_regs.h"
+/* begin *********************
+ * macros to expend register list macro defined in HW object header file */
+
+/* DCN */
+#define block HPD
+#define reg_num 0
+
+#undef BASE_INNER
+#define BASE_INNER(seg) DCN_BASE__INST0_SEG ## seg
+
+#define BASE(seg) BASE_INNER(seg)
+
+
+
+#define REG(reg_name)\
+ BASE(mm ## reg_name ## _BASE_IDX) + mm ## reg_name
+
+#define SF_HPD(reg_name, field_name, post_fix)\
+ .field_name = HPD0_ ## reg_name ## __ ## field_name ## post_fix
+
+#define REGI(reg_name, block, id)\
+ BASE(mm ## block ## id ## _ ## reg_name ## _BASE_IDX) + \
+ mm ## block ## id ## _ ## reg_name
+
+#define SF(reg_name, field_name, post_fix)\
+ .field_name = reg_name ## __ ## field_name ## post_fix
+
+/* macros to expend register list macro defined in HW object header file
+ * end *********************/
+
+
+
+#define hpd_regs(id) \
+{\
+ HPD_REG_LIST(id)\
+}
+
+static const struct hpd_registers hpd_regs[] = {
+ hpd_regs(0),
+ hpd_regs(1),
+ hpd_regs(2),
+ hpd_regs(3),
+ hpd_regs(4),
+ hpd_regs(5),
+};
+
+static const struct hpd_sh_mask hpd_shift = {
+ HPD_MASK_SH_LIST(__SHIFT)
+};
+
+static const struct hpd_sh_mask hpd_mask = {
+ HPD_MASK_SH_LIST(_MASK)
+};
+
+#include "../ddc_regs.h"
+
+ /* set field name */
+#define SF_DDC(reg_name, field_name, post_fix)\
+ .field_name = reg_name ## __ ## field_name ## post_fix
+
+static const struct ddc_registers ddc_data_regs_dcn[] = {
+ ddc_data_regs_dcn2(1),
+ ddc_data_regs_dcn2(2),
+ ddc_data_regs_dcn2(3),
+ ddc_data_regs_dcn2(4),
+ ddc_data_regs_dcn2(5),
+ ddc_data_regs_dcn2(6),
+};
+
+static const struct ddc_registers ddc_clk_regs_dcn[] = {
+ ddc_clk_regs_dcn2(1),
+ ddc_clk_regs_dcn2(2),
+ ddc_clk_regs_dcn2(3),
+ ddc_clk_regs_dcn2(4),
+ ddc_clk_regs_dcn2(5),
+ ddc_clk_regs_dcn2(6),
+};
+
+static const struct ddc_sh_mask ddc_shift[] = {
+ DDC_MASK_SH_LIST_DCN2(__SHIFT, 1),
+ DDC_MASK_SH_LIST_DCN2(__SHIFT, 2),
+ DDC_MASK_SH_LIST_DCN2(__SHIFT, 3),
+ DDC_MASK_SH_LIST_DCN2(__SHIFT, 4),
+ DDC_MASK_SH_LIST_DCN2(__SHIFT, 5),
+ DDC_MASK_SH_LIST_DCN2(__SHIFT, 6)
+};
+
+static const struct ddc_sh_mask ddc_mask[] = {
+ DDC_MASK_SH_LIST_DCN2(_MASK, 1),
+ DDC_MASK_SH_LIST_DCN2(_MASK, 2),
+ DDC_MASK_SH_LIST_DCN2(_MASK, 3),
+ DDC_MASK_SH_LIST_DCN2(_MASK, 4),
+ DDC_MASK_SH_LIST_DCN2(_MASK, 5),
+ DDC_MASK_SH_LIST_DCN2(_MASK, 6)
+};
+
+#include "../generic_regs.h"
+
+/* set field name */
+#define SF_GENERIC(reg_name, field_name, post_fix)\
+ .field_name = reg_name ## __ ## field_name ## post_fix
+
+#define generic_regs(id) \
+{\
+ GENERIC_REG_LIST(id)\
+}
+
+static const struct generic_registers generic_regs[] = {
+ generic_regs(A),
+ generic_regs(B),
+};
+
+static const struct generic_sh_mask generic_shift[] = {
+ GENERIC_MASK_SH_LIST(__SHIFT, A),
+ GENERIC_MASK_SH_LIST(__SHIFT, B),
+};
+
+static const struct generic_sh_mask generic_mask[] = {
+ GENERIC_MASK_SH_LIST(_MASK, A),
+ GENERIC_MASK_SH_LIST(_MASK, B),
+};
+
+static void define_generic_registers(struct hw_gpio_pin *pin, uint32_t en)
+{
+ struct hw_generic *generic = HW_GENERIC_FROM_BASE(pin);
+
+ generic->regs = &generic_regs[en];
+ generic->shifts = &generic_shift[en];
+ generic->masks = &generic_mask[en];
+ generic->base.regs = &generic_regs[en].gpio;
+}
+
+static void define_ddc_registers(
+ struct hw_gpio_pin *pin,
+ uint32_t en)
+{
+ struct hw_ddc *ddc = HW_DDC_FROM_BASE(pin);
+
+ switch (pin->id) {
+ case GPIO_ID_DDC_DATA:
+ ddc->regs = &ddc_data_regs_dcn[en];
+ ddc->base.regs = &ddc_data_regs_dcn[en].gpio;
+ break;
+ case GPIO_ID_DDC_CLOCK:
+ ddc->regs = &ddc_clk_regs_dcn[en];
+ ddc->base.regs = &ddc_clk_regs_dcn[en].gpio;
+ break;
+ default:
+ ASSERT_CRITICAL(false);
+ return;
+ }
+
+ ddc->shifts = &ddc_shift[en];
+ ddc->masks = &ddc_mask[en];
+
+}
+
+static void define_hpd_registers(struct hw_gpio_pin *pin, uint32_t en)
+{
+ struct hw_hpd *hpd = HW_HPD_FROM_BASE(pin);
+
+ hpd->regs = &hpd_regs[en];
+ hpd->shifts = &hpd_shift;
+ hpd->masks = &hpd_mask;
+ hpd->base.regs = &hpd_regs[en].gpio;
+}
+
+
+/* fucntion table */
+static const struct hw_factory_funcs funcs = {
+ .init_ddc_data = dal_hw_ddc_init,
+ .init_generic = dal_hw_generic_init,
+ .init_hpd = dal_hw_hpd_init,
+ .get_ddc_pin = dal_hw_ddc_get_pin,
+ .get_hpd_pin = dal_hw_hpd_get_pin,
+ .get_generic_pin = dal_hw_generic_get_pin,
+ .define_hpd_registers = define_hpd_registers,
+ .define_ddc_registers = define_ddc_registers,
+ .define_generic_registers = define_generic_registers
+};
+/*
+ * dal_hw_factory_dcn10_init
+ *
+ * @brief
+ * Initialize HW factory function pointers and pin info
+ *
+ * @param
+ * struct hw_factory *factory - [out] struct of function pointers
+ */
+void dal_hw_factory_dcn30_init(struct hw_factory *factory)
+{
+ /*TODO check ASIC CAPs*/
+ factory->number_of_pins[GPIO_ID_DDC_DATA] = 8;
+ factory->number_of_pins[GPIO_ID_DDC_CLOCK] = 8;
+ factory->number_of_pins[GPIO_ID_GENERIC] = 4;
+ factory->number_of_pins[GPIO_ID_HPD] = 6;
+ factory->number_of_pins[GPIO_ID_GPIO_PAD] = 28;
+ factory->number_of_pins[GPIO_ID_VIP_PAD] = 0;
+ factory->number_of_pins[GPIO_ID_SYNC] = 0;
+ factory->number_of_pins[GPIO_ID_GSL] = 0;/*add this*/
+
+ factory->funcs = &funcs;
+}
+
+#endif
diff --git a/drivers/gpu/drm/amd/display/dc/gpio/dcn30/hw_factory_dcn30.h b/drivers/gpu/drm/amd/display/dc/gpio/dcn30/hw_factory_dcn30.h
new file mode 100644
index 000000000000..7ad15f5aa95f
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/gpio/dcn30/hw_factory_dcn30.h
@@ -0,0 +1,33 @@
+/*
+ * Copyright 2020 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+#if defined(CONFIG_DRM_AMD_DC_DCN3_0)
+#ifndef __DAL_HW_FACTORY_DCN30_H__
+#define __DAL_HW_FACTORY_DCN30_H__
+
+/* Initialize HW factory function pointers and pin info */
+void dal_hw_factory_dcn30_init(struct hw_factory *factory);
+
+#endif /* __DAL_HW_FACTORY_DCN30_H__ */
+#endif
diff --git a/drivers/gpu/drm/amd/display/dc/gpio/dcn30/hw_translate_dcn30.c b/drivers/gpu/drm/amd/display/dc/gpio/dcn30/hw_translate_dcn30.c
new file mode 100644
index 000000000000..c2f42e0248b3
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/gpio/dcn30/hw_translate_dcn30.c
@@ -0,0 +1,387 @@
+/*
+ * Copyright 2020 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+/*
+ * Pre-requisites: headers required by header of this unit
+ */
+#if defined(CONFIG_DRM_AMD_DC_DCN3_0)
+#include "hw_translate_dcn30.h"
+
+#include "dm_services.h"
+#include "include/gpio_types.h"
+#include "../hw_translate.h"
+
+
+#include "sienna_cichlid_ip_offset.h"
+#include "dcn/dcn_3_0_0_offset.h"
+#include "dcn/dcn_3_0_0_sh_mask.h"
+
+#include "nbio/nbio_7_4_offset.h"
+
+#include "dcn/dpcs_3_0_0_offset.h"
+#include "dcn/dpcs_3_0_0_sh_mask.h"
+
+#include "mmhub/mmhub_2_0_0_offset.h"
+#include "mmhub/mmhub_2_0_0_sh_mask.h"
+/* begin *********************
+ * macros to expend register list macro defined in HW object header file */
+
+/* DCN */
+#define block HPD
+#define reg_num 0
+
+#undef BASE_INNER
+#define BASE_INNER(seg) DCN_BASE__INST0_SEG ## seg
+
+#define BASE(seg) BASE_INNER(seg)
+
+#undef REG
+#define REG(reg_name)\
+ BASE(mm ## reg_name ## _BASE_IDX) + mm ## reg_name
+#define SF_HPD(reg_name, field_name, post_fix)\
+ .field_name = reg_name ## __ ## field_name ## post_fix
+
+
+/* macros to expend register list macro defined in HW object header file
+ * end *********************/
+
+
+static bool offset_to_id(
+ uint32_t offset,
+ uint32_t mask,
+ enum gpio_id *id,
+ uint32_t *en)
+{
+ switch (offset) {
+ /* GENERIC */
+ case REG(DC_GPIO_GENERIC_A):
+ *id = GPIO_ID_GENERIC;
+ switch (mask) {
+ case DC_GPIO_GENERIC_A__DC_GPIO_GENERICA_A_MASK:
+ *en = GPIO_GENERIC_A;
+ return true;
+ case DC_GPIO_GENERIC_A__DC_GPIO_GENERICB_A_MASK:
+ *en = GPIO_GENERIC_B;
+ return true;
+ case DC_GPIO_GENERIC_A__DC_GPIO_GENERICC_A_MASK:
+ *en = GPIO_GENERIC_C;
+ return true;
+ case DC_GPIO_GENERIC_A__DC_GPIO_GENERICD_A_MASK:
+ *en = GPIO_GENERIC_D;
+ return true;
+ case DC_GPIO_GENERIC_A__DC_GPIO_GENERICE_A_MASK:
+ *en = GPIO_GENERIC_E;
+ return true;
+ case DC_GPIO_GENERIC_A__DC_GPIO_GENERICF_A_MASK:
+ *en = GPIO_GENERIC_F;
+ return true;
+ case DC_GPIO_GENERIC_A__DC_GPIO_GENERICG_A_MASK:
+ *en = GPIO_GENERIC_G;
+ return true;
+ default:
+ ASSERT_CRITICAL(false);
+ return false;
+ }
+ break;
+ /* HPD */
+ case REG(DC_GPIO_HPD_A):
+ *id = GPIO_ID_HPD;
+ switch (mask) {
+ case DC_GPIO_HPD_A__DC_GPIO_HPD1_A_MASK:
+ *en = GPIO_HPD_1;
+ return true;
+ case DC_GPIO_HPD_A__DC_GPIO_HPD2_A_MASK:
+ *en = GPIO_HPD_2;
+ return true;
+ case DC_GPIO_HPD_A__DC_GPIO_HPD3_A_MASK:
+ *en = GPIO_HPD_3;
+ return true;
+ case DC_GPIO_HPD_A__DC_GPIO_HPD4_A_MASK:
+ *en = GPIO_HPD_4;
+ return true;
+ case DC_GPIO_HPD_A__DC_GPIO_HPD5_A_MASK:
+ *en = GPIO_HPD_5;
+ return true;
+ case DC_GPIO_HPD_A__DC_GPIO_HPD6_A_MASK:
+ *en = GPIO_HPD_6;
+ return true;
+ default:
+ ASSERT_CRITICAL(false);
+ return false;
+ }
+ break;
+ /* REG(DC_GPIO_GENLK_MASK */
+ case REG(DC_GPIO_GENLK_A):
+ *id = GPIO_ID_GSL;
+ switch (mask) {
+ case DC_GPIO_GENLK_A__DC_GPIO_GENLK_CLK_A_MASK:
+ *en = GPIO_GSL_GENLOCK_CLOCK;
+ return true;
+ case DC_GPIO_GENLK_A__DC_GPIO_GENLK_VSYNC_A_MASK:
+ *en = GPIO_GSL_GENLOCK_VSYNC;
+ return true;
+ case DC_GPIO_GENLK_A__DC_GPIO_SWAPLOCK_A_A_MASK:
+ *en = GPIO_GSL_SWAPLOCK_A;
+ return true;
+ case DC_GPIO_GENLK_A__DC_GPIO_SWAPLOCK_B_A_MASK:
+ *en = GPIO_GSL_SWAPLOCK_B;
+ return true;
+ default:
+ ASSERT_CRITICAL(false);
+ return false;
+ }
+ break;
+ /* DDC */
+ /* we don't care about the GPIO_ID for DDC
+ * in DdcHandle it will use GPIO_ID_DDC_DATA/GPIO_ID_DDC_CLOCK
+ * directly in the create method */
+ case REG(DC_GPIO_DDC1_A):
+ *en = GPIO_DDC_LINE_DDC1;
+ return true;
+ case REG(DC_GPIO_DDC2_A):
+ *en = GPIO_DDC_LINE_DDC2;
+ return true;
+ case REG(DC_GPIO_DDC3_A):
+ *en = GPIO_DDC_LINE_DDC3;
+ return true;
+ case REG(DC_GPIO_DDC4_A):
+ *en = GPIO_DDC_LINE_DDC4;
+ return true;
+ case REG(DC_GPIO_DDC5_A):
+ *en = GPIO_DDC_LINE_DDC5;
+ return true;
+ case REG(DC_GPIO_DDC6_A):
+ *en = GPIO_DDC_LINE_DDC6;
+ return true;
+ case REG(DC_GPIO_DDCVGA_A):
+ *en = GPIO_DDC_LINE_DDC_VGA;
+ return true;
+
+// case REG(DC_GPIO_I2CPAD_A): not exit
+// case REG(DC_GPIO_PWRSEQ_A):
+// case REG(DC_GPIO_PAD_STRENGTH_1):
+// case REG(DC_GPIO_PAD_STRENGTH_2):
+// case REG(DC_GPIO_DEBUG):
+ /* UNEXPECTED */
+ default:
+// case REG(DC_GPIO_SYNCA_A): not exist
+ ASSERT_CRITICAL(false);
+ return false;
+ }
+}
+
+static bool id_to_offset(
+ enum gpio_id id,
+ uint32_t en,
+ struct gpio_pin_info *info)
+{
+ bool result = true;
+
+ switch (id) {
+ case GPIO_ID_DDC_DATA:
+ info->mask = DC_GPIO_DDC6_A__DC_GPIO_DDC6DATA_A_MASK;
+ switch (en) {
+ case GPIO_DDC_LINE_DDC1:
+ info->offset = REG(DC_GPIO_DDC1_A);
+ break;
+ case GPIO_DDC_LINE_DDC2:
+ info->offset = REG(DC_GPIO_DDC2_A);
+ break;
+ case GPIO_DDC_LINE_DDC3:
+ info->offset = REG(DC_GPIO_DDC3_A);
+ break;
+ case GPIO_DDC_LINE_DDC4:
+ info->offset = REG(DC_GPIO_DDC4_A);
+ break;
+ case GPIO_DDC_LINE_DDC5:
+ info->offset = REG(DC_GPIO_DDC5_A);
+ break;
+ case GPIO_DDC_LINE_DDC6:
+ info->offset = REG(DC_GPIO_DDC6_A);
+ break;
+ case GPIO_DDC_LINE_DDC_VGA:
+ info->offset = REG(DC_GPIO_DDCVGA_A);
+ break;
+ case GPIO_DDC_LINE_I2C_PAD:
+ default:
+ ASSERT_CRITICAL(false);
+ result = false;
+ }
+ break;
+ case GPIO_ID_DDC_CLOCK:
+ info->mask = DC_GPIO_DDC6_A__DC_GPIO_DDC6CLK_A_MASK;
+ switch (en) {
+ case GPIO_DDC_LINE_DDC1:
+ info->offset = REG(DC_GPIO_DDC1_A);
+ break;
+ case GPIO_DDC_LINE_DDC2:
+ info->offset = REG(DC_GPIO_DDC2_A);
+ break;
+ case GPIO_DDC_LINE_DDC3:
+ info->offset = REG(DC_GPIO_DDC3_A);
+ break;
+ case GPIO_DDC_LINE_DDC4:
+ info->offset = REG(DC_GPIO_DDC4_A);
+ break;
+ case GPIO_DDC_LINE_DDC5:
+ info->offset = REG(DC_GPIO_DDC5_A);
+ break;
+ case GPIO_DDC_LINE_DDC6:
+ info->offset = REG(DC_GPIO_DDC6_A);
+ break;
+ case GPIO_DDC_LINE_DDC_VGA:
+ info->offset = REG(DC_GPIO_DDCVGA_A);
+ break;
+ case GPIO_DDC_LINE_I2C_PAD:
+ default:
+ ASSERT_CRITICAL(false);
+ result = false;
+ }
+ break;
+ case GPIO_ID_GENERIC:
+ info->offset = REG(DC_GPIO_GENERIC_A);
+ switch (en) {
+ case GPIO_GENERIC_A:
+ info->mask = DC_GPIO_GENERIC_A__DC_GPIO_GENERICA_A_MASK;
+ break;
+ case GPIO_GENERIC_B:
+ info->mask = DC_GPIO_GENERIC_A__DC_GPIO_GENERICB_A_MASK;
+ break;
+ case GPIO_GENERIC_C:
+ info->mask = DC_GPIO_GENERIC_A__DC_GPIO_GENERICC_A_MASK;
+ break;
+ case GPIO_GENERIC_D:
+ info->mask = DC_GPIO_GENERIC_A__DC_GPIO_GENERICD_A_MASK;
+ break;
+ case GPIO_GENERIC_E:
+ info->mask = DC_GPIO_GENERIC_A__DC_GPIO_GENERICE_A_MASK;
+ break;
+ case GPIO_GENERIC_F:
+ info->mask = DC_GPIO_GENERIC_A__DC_GPIO_GENERICF_A_MASK;
+ break;
+ case GPIO_GENERIC_G:
+ info->mask = DC_GPIO_GENERIC_A__DC_GPIO_GENERICG_A_MASK;
+ break;
+ default:
+ ASSERT_CRITICAL(false);
+ result = false;
+ }
+ break;
+ case GPIO_ID_HPD:
+ info->offset = REG(DC_GPIO_HPD_A);
+ switch (en) {
+ case GPIO_HPD_1:
+ info->mask = DC_GPIO_HPD_A__DC_GPIO_HPD1_A_MASK;
+ break;
+ case GPIO_HPD_2:
+ info->mask = DC_GPIO_HPD_A__DC_GPIO_HPD2_A_MASK;
+ break;
+ case GPIO_HPD_3:
+ info->mask = DC_GPIO_HPD_A__DC_GPIO_HPD3_A_MASK;
+ break;
+ case GPIO_HPD_4:
+ info->mask = DC_GPIO_HPD_A__DC_GPIO_HPD4_A_MASK;
+ break;
+ case GPIO_HPD_5:
+ info->mask = DC_GPIO_HPD_A__DC_GPIO_HPD5_A_MASK;
+ break;
+ case GPIO_HPD_6:
+ info->mask = DC_GPIO_HPD_A__DC_GPIO_HPD6_A_MASK;
+ break;
+ default:
+ ASSERT_CRITICAL(false);
+ result = false;
+ }
+ break;
+ case GPIO_ID_GSL:
+ switch (en) {
+ case GPIO_GSL_GENLOCK_CLOCK:
+ /*not implmented*/
+ ASSERT_CRITICAL(false);
+ result = false;
+ break;
+ case GPIO_GSL_GENLOCK_VSYNC:
+ /*not implmented*/
+ ASSERT_CRITICAL(false);
+ result = false;
+ break;
+ case GPIO_GSL_SWAPLOCK_A:
+ /*not implmented*/
+ ASSERT_CRITICAL(false);
+ result = false;
+ break;
+ case GPIO_GSL_SWAPLOCK_B:
+ /*not implmented*/
+ ASSERT_CRITICAL(false);
+ result = false;
+
+ break;
+ default:
+ ASSERT_CRITICAL(false);
+ result = false;
+ }
+ break;
+ case GPIO_ID_SYNC:
+ case GPIO_ID_VIP_PAD:
+ default:
+ ASSERT_CRITICAL(false);
+ result = false;
+ }
+
+ if (result) {
+ info->offset_y = info->offset + 2;
+ info->offset_en = info->offset + 1;
+ info->offset_mask = info->offset - 1;
+
+ info->mask_y = info->mask;
+ info->mask_en = info->mask;
+ info->mask_mask = info->mask;
+ }
+
+ return result;
+}
+
+/* function table */
+static const struct hw_translate_funcs funcs = {
+ .offset_to_id = offset_to_id,
+ .id_to_offset = id_to_offset,
+};
+
+/*
+ * dal_hw_translate_dcn10_init
+ *
+ * @brief
+ * Initialize Hw translate function pointers.
+ *
+ * @param
+ * struct hw_translate *tr - [out] struct of function pointers
+ *
+ */
+void dal_hw_translate_dcn30_init(struct hw_translate *tr)
+{
+ tr->funcs = &funcs;
+}
+
+#endif
diff --git a/drivers/gpu/drm/amd/display/dc/gpio/dcn30/hw_translate_dcn30.h b/drivers/gpu/drm/amd/display/dc/gpio/dcn30/hw_translate_dcn30.h
new file mode 100644
index 000000000000..fe6c3f84aef9
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/gpio/dcn30/hw_translate_dcn30.h
@@ -0,0 +1,35 @@
+/*
+ * Copyright 2020 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+#if defined(CONFIG_DRM_AMD_DC_DCN3_0)
+#ifndef __DAL_HW_TRANSLATE_DCN30_H__
+#define __DAL_HW_TRANSLATE_DCN30_H__
+
+struct hw_translate;
+
+/* Initialize Hw translate function pointers */
+void dal_hw_translate_dcn30_init(struct hw_translate *tr);
+
+#endif /* __DAL_HW_TRANSLATE_DCN30_H__ */
+#endif
diff --git a/drivers/gpu/drm/amd/display/dc/gpio/hw_factory.c b/drivers/gpu/drm/amd/display/dc/gpio/hw_factory.c
index f252af1947c3..e5cfe28bc7bf 100644
--- a/drivers/gpu/drm/amd/display/dc/gpio/hw_factory.c
+++ b/drivers/gpu/drm/amd/display/dc/gpio/hw_factory.c
@@ -50,6 +50,9 @@
#include "dcn20/hw_factory_dcn20.h"
#include "dcn21/hw_factory_dcn21.h"
#endif
+#if defined(CONFIG_DRM_AMD_DC_DCN3_0)
+#include "dcn30/hw_factory_dcn30.h"
+#endif
#include "diagnostics/hw_factory_diag.h"
@@ -99,7 +102,11 @@ bool dal_hw_factory_init(
dal_hw_factory_dcn21_init(factory);
return true;
#endif
-
+#if defined(CONFIG_DRM_AMD_DC_DCN3_0)
+ case DCN_VERSION_3_0:
+ dal_hw_factory_dcn30_init(factory);
+ return true;
+#endif
default:
ASSERT_CRITICAL(false);
return false;
diff --git a/drivers/gpu/drm/amd/display/dc/gpio/hw_translate.c b/drivers/gpu/drm/amd/display/dc/gpio/hw_translate.c
index 04e2c0f74cb0..efea7cb0f17c 100644
--- a/drivers/gpu/drm/amd/display/dc/gpio/hw_translate.c
+++ b/drivers/gpu/drm/amd/display/dc/gpio/hw_translate.c
@@ -48,6 +48,9 @@
#include "dcn20/hw_translate_dcn20.h"
#include "dcn21/hw_translate_dcn21.h"
#endif
+#if defined(CONFIG_DRM_AMD_DC_DCN3_0)
+#include "dcn30/hw_translate_dcn30.h"
+#endif
#include "diagnostics/hw_translate_diag.h"
@@ -94,6 +97,11 @@ bool dal_hw_translate_init(
dal_hw_translate_dcn21_init(translate);
return true;
#endif
+#if defined(CONFIG_DRM_AMD_DC_DCN3_0)
+ case DCN_VERSION_3_0:
+ dal_hw_translate_dcn30_init(translate);
+ return true;
+#endif
default:
BREAK_TO_DEBUGGER();
diff --git a/drivers/gpu/drm/amd/display/dc/inc/core_status.h b/drivers/gpu/drm/amd/display/dc/inc/core_status.h
index 4ead89dd7c41..f932801235c6 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/core_status.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/core_status.h
@@ -53,4 +53,6 @@ enum dc_status {
DC_ERROR_UNEXPECTED = -1
};
+char *dc_status_to_str(enum dc_status status);
+
#endif /* _CORE_STATUS_H_ */
diff --git a/drivers/gpu/drm/amd/display/dc/inc/core_types.h b/drivers/gpu/drm/amd/display/dc/inc/core_types.h
index c7fd702a4a87..5f985fcbedf1 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/core_types.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/core_types.h
@@ -101,7 +101,11 @@ struct resource_funcs {
struct dc *dc,
struct dc_state *context,
bool fast_validate);
-
+ void (*calculate_wm)(
+ struct dc *dc, struct dc_state *context,
+ display_e2e_pipe_params_st *pipes,
+ int pipe_cnt,
+ int vlevel);
int (*populate_dml_pipes)(
struct dc *dc,
struct dc_state *context,
@@ -147,6 +151,20 @@ struct resource_funcs {
void (*update_bw_bounding_box)(
struct dc *dc,
struct clk_bw_params *bw_params);
+#if defined(CONFIG_DRM_AMD_DC_DCN3_0)
+ bool (*acquire_post_bldn_3dlut)(
+ struct resource_context *res_ctx,
+ const struct resource_pool *pool,
+ int mpcc_id,
+ struct dc_3dlut **lut,
+ struct dc_transfer_func **shaper);
+
+ bool (*release_post_bldn_3dlut)(
+ struct resource_context *res_ctx,
+ const struct resource_pool *pool,
+ struct dc_3dlut **lut,
+ struct dc_transfer_func **shaper);
+#endif
};
@@ -189,6 +207,10 @@ struct resource_pool {
unsigned int underlay_pipe_index;
unsigned int stream_enc_count;
+#if defined(CONFIG_DRM_AMD_DC_DCN3_0)
+ struct dc_3dlut *mpc_lut[MAX_PIPES];
+ struct dc_transfer_func *mpc_shaper[MAX_PIPES];
+#endif
struct {
unsigned int xtalin_clock_inKhz;
unsigned int dccg_ref_clock_inKhz;
@@ -217,6 +239,10 @@ struct resource_pool {
struct dmcu *dmcu;
struct dmub_psr *psr;
+#if defined(CONFIG_DRM_AMD_DC_DCN3_0)
+ struct abm *multiple_abms[MAX_PIPES];
+#endif
+
const struct resource_funcs *funcs;
const struct resource_caps *res_cap;
@@ -312,6 +338,9 @@ struct resource_context {
uint8_t clock_source_ref_count[MAX_CLOCK_SOURCES];
uint8_t dp_clock_source_ref_count;
bool is_dsc_acquired[MAX_PIPES];
+#if defined(CONFIG_DRM_AMD_DC_DCN3_0)
+ bool is_mpc_3dlut_acquired[MAX_PIPES];
+#endif
};
struct dce_bw_output {
diff --git a/drivers/gpu/drm/amd/display/dc/inc/dc_link_ddc.h b/drivers/gpu/drm/amd/display/dc/inc/dc_link_ddc.h
index de2d160114db..b324e13f3f78 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/dc_link_ddc.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/dc_link_ddc.h
@@ -105,7 +105,7 @@ int dc_link_aux_transfer_raw(struct ddc_service *ddc,
bool dc_link_aux_transfer_with_retries(struct ddc_service *ddc,
struct aux_payload *payload);
-uint32_t dc_link_aux_configure_timeout(struct ddc_service *ddc,
+bool dc_link_aux_try_to_configure_timeout(struct ddc_service *ddc,
uint32_t timeout);
void dal_ddc_service_write_scdc_data(
diff --git a/drivers/gpu/drm/amd/display/dc/inc/dc_link_dp.h b/drivers/gpu/drm/amd/display/dc/inc/dc_link_dp.h
index e94e5fbf2aa2..b970a32177af 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/dc_link_dp.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/dc_link_dp.h
@@ -28,7 +28,7 @@
#define LINK_TRAINING_ATTEMPTS 4
#define LINK_TRAINING_RETRY_DELAY 50 /* ms */
-#define LINK_AUX_DEFAULT_EXTENDED_TIMEOUT_PERIOD 3200 /*us*/
+#define LINK_AUX_DEFAULT_LTTPR_TIMEOUT_PERIOD 3200 /*us*/
#define LINK_AUX_DEFAULT_TIMEOUT_PERIOD 552 /*us*/
struct dc_link;
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr.h b/drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr.h
index ce65678c03b2..505357597603 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr.h
@@ -43,6 +43,25 @@
#define DCN_MINIMUM_DISPCLK_Khz 100000
#define DCN_MINIMUM_DPPCLK_Khz 100000
+#if defined(CONFIG_DRM_AMD_DC_DCN3_0)
+struct dcn3_clk_internal {
+ int dummy;
+ /*TODO:
+ uint32_t CLK1_CLK0_CURRENT_CNT; //dispclk
+ uint32_t CLK1_CLK1_CURRENT_CNT; //dppclk
+ uint32_t CLK1_CLK2_CURRENT_CNT; //dprefclk
+ uint32_t CLK1_CLK3_CURRENT_CNT; //dcfclk
+ uint32_t CLK1_CLK3_DS_CNTL; //dcf_deep_sleep_divider
+ uint32_t CLK1_CLK3_ALLOW_DS; //dcf_deep_sleep_allow
+
+ uint32_t CLK1_CLK0_BYPASS_CNTL; //dispclk bypass
+ uint32_t CLK1_CLK1_BYPASS_CNTL; //dppclk bypass
+ uint32_t CLK1_CLK2_BYPASS_CNTL; //dprefclk bypass
+ uint32_t CLK1_CLK3_BYPASS_CNTL; //dcfclk bypass
+ */
+};
+
+#endif
/* Will these bw structures be ASIC specific? */
#define MAX_NUM_DPM_LVL 8
@@ -55,6 +74,12 @@ struct clk_limit_table_entry {
unsigned int fclk_mhz;
unsigned int memclk_mhz;
unsigned int socclk_mhz;
+#ifdef CONFIG_DRM_AMD_DC_DCN3_0
+ unsigned int dtbclk_mhz;
+ unsigned int dispclk_mhz;
+ unsigned int dppclk_mhz;
+ unsigned int phyclk_mhz;
+#endif
};
/* This table is contiguous */
@@ -72,6 +97,26 @@ struct wm_range_table_entry {
bool valid;
};
+#ifdef CONFIG_DRM_AMD_DC_DCN3_0
+
+struct nv_wm_range_entry {
+ bool valid;
+
+ struct {
+ uint8_t wm_type;
+ uint16_t min_dcfclk;
+ uint16_t max_dcfclk;
+ uint16_t min_uclk;
+ uint16_t max_uclk;
+ } pmfw_breakdown;
+
+ struct {
+ double pstate_latency_us;
+ double sr_exit_time_us;
+ double sr_enter_plus_exit_time_us;
+ } dml_input;
+};
+#endif
struct clk_log_info {
bool enabled;
@@ -143,7 +188,19 @@ struct clk_bypass {
* D occupied, C will be emptry.
*/
struct wm_table {
+#ifdef CONFIG_DRM_AMD_DC_DCN3_0
+ union {
+ struct nv_wm_range_entry nv_entries[WM_SET_COUNT];
+#endif
struct wm_range_table_entry entries[WM_SET_COUNT];
+#ifdef CONFIG_DRM_AMD_DC_DCN3_0
+ };
+#endif
+};
+
+struct dummy_pstate_entry {
+ unsigned int dram_speed_mts;
+ unsigned int dummy_pstate_latency_us;
};
struct clk_bw_params {
@@ -151,6 +208,7 @@ struct clk_bw_params {
unsigned int num_channels;
struct clk_limit_table clk_table;
struct wm_table wm_table;
+ struct dummy_pstate_entry dummy_pstate_table[4];
};
/* Public interfaces */
@@ -183,6 +241,20 @@ struct clk_mgr_funcs {
bool (*are_clock_states_equal) (struct dc_clocks *a,
struct dc_clocks *b);
void (*notify_wm_ranges)(struct clk_mgr *clk_mgr);
+#ifdef CONFIG_DRM_AMD_DC_DCN3_0
+ /*
+ * Send message to PMFW to set hard min memclk frequency
+ * When current_mode = false, set DPM0
+ * When current_mode = true, set required clock for current mode
+ */
+ void (*set_hard_min_memclk)(struct clk_mgr *clk_mgr, bool current_mode);
+
+ /* Send message to PMFW to set hard max memclk frequency to highest DPM */
+ void (*set_hard_max_memclk)(struct clk_mgr *clk_mgr);
+
+ /* Get current memclk states from PMFW, update relevant structures */
+ void (*get_memclk_states_from_smu)(struct clk_mgr *clk_mgr);
+#endif
};
struct clk_mgr {
@@ -190,6 +262,9 @@ struct clk_mgr {
struct clk_mgr_funcs *funcs;
struct dc_clocks clks;
bool psr_allow_active_cache;
+#ifdef CONFIG_DRM_AMD_DC_DCN3_0
+ bool force_smu_not_present;
+#endif
int dprefclk_khz; // Used by program pixel clock in clock source funcs, need to figureout where this goes
int dentist_vco_freq_khz;
struct clk_state_registers_and_bypass boot_snapshot;
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr_internal.h b/drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr_internal.h
index 9311d0de377f..c3c151be7d03 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr_internal.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr_internal.h
@@ -101,6 +101,12 @@ enum dentist_divider_range {
CLK_SRI(CLK3_CLK_PLL_REQ, CLK3, 0), \
CLK_SRI(CLK3_CLK2_DFS_CNTL, CLK3, 0)
+#ifdef CONFIG_DRM_AMD_DC_DCN3_0
+// TODO:
+#define CLK_REG_LIST_DCN3() \
+ SR(DENTIST_DISPCLK_CNTL)
+#endif
+
#define CLK_SF(reg_name, field_name, post_fix)\
.field_name = reg_name ## __ ## field_name ## post_fix
@@ -167,6 +173,10 @@ struct clk_mgr_registers {
uint32_t CLK3_CLK2_DFS_CNTL;
uint32_t CLK3_CLK_PLL_REQ;
+#ifdef CONFIG_DRM_AMD_DC_DCN3_0
+ uint32_t CLK0_CLK2_DFS_CNTL;
+ uint32_t CLK0_CLK_PLL_REQ;
+#endif
uint32_t MP1_SMN_C2PMSG_67;
uint32_t MP1_SMN_C2PMSG_83;
uint32_t MP1_SMN_C2PMSG_91;
@@ -260,6 +270,10 @@ struct clk_mgr_internal {
enum dm_pp_clocks_state max_clks_state;
enum dm_pp_clocks_state cur_min_clks_state;
+#ifdef CONFIG_DRM_AMD_DC_DCN3_0
+
+ bool smu_present;
+#endif
};
struct clk_mgr_internal_funcs {
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/dchubbub.h b/drivers/gpu/drm/amd/display/dc/inc/hw/dchubbub.h
index 47a566d82d6e..65f182c8bf14 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/dchubbub.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/dchubbub.h
@@ -31,6 +31,9 @@ enum dcc_control {
dcc_control__256_256_xxx,
dcc_control__128_128_xxx,
dcc_control__256_64_64,
+#if defined(CONFIG_DRM_AMD_DC_DCN3_0)
+ dcc_control__256_128_128,
+#endif
};
enum segment_order {
@@ -62,6 +65,9 @@ enum dcn_hubbub_page_table_depth {
enum dcn_hubbub_page_table_block_size {
DCN_PAGE_TABLE_BLOCK_SIZE_4KB = 0,
DCN_PAGE_TABLE_BLOCK_SIZE_64KB = 4,
+#if defined(CONFIG_DRM_AMD_DC_DCN3_0)
+ DCN_PAGE_TABLE_BLOCK_SIZE_32KB = 3
+#endif
};
struct dcn_hubbub_phys_addr_config {
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/dpp.h b/drivers/gpu/drm/amd/display/dc/inc/hw/dpp.h
index 45ef390ae052..0491720c5fe9 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/dpp.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/dpp.h
@@ -121,6 +121,13 @@ struct CM_bias_params {
};
struct dpp_funcs {
+#if defined(CONFIG_DRM_AMD_DC_DCN3_0)
+ bool (*dpp_program_gamcor_lut)(
+ struct dpp *dpp_base, const struct pwl_params *params);
+
+ void (*dpp_set_pre_degam)(struct dpp *dpp_base,
+ enum dc_transfer_func_predefined tr);
+#endif
void (*dpp_program_cm_dealpha)(struct dpp *dpp_base,
uint32_t enable, uint32_t additive_blending);
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/dwb.h b/drivers/gpu/drm/amd/display/dc/inc/hw/dwb.h
index f30ab4916242..3407e5da5534 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/dwb.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/dwb.h
@@ -72,6 +72,67 @@ enum wbscl_coef_filter_type_sel {
};
+#if defined(CONFIG_DRM_AMD_DC_DCN3_0)
+enum dwb_boundary_mode {
+ DWBSCL_BOUNDARY_MODE_EDGE = 0,
+ DWBSCL_BOUNDARY_MODE_BLACK = 1
+};
+#endif
+
+#if defined(CONFIG_DRM_AMD_DC_DCN3_0)
+enum dwb_output_csc_mode {
+ DWB_OUTPUT_CSC_DISABLE = 0,
+ DWB_OUTPUT_CSC_COEF_A = 1,
+ DWB_OUTPUT_CSC_COEF_B = 2
+};
+
+enum dwb_ogam_lut_mode {
+ DWB_OGAM_MODE_BYPASS,
+ DWB_OGAM_RAMA_LUT,
+ DWB_OGAM_RAMB_LUT
+};
+
+enum dwb_color_volume {
+ DWB_SRGB_BT709 = 0, //SDR
+ DWB_PQ = 1, //HDR
+ DWB_HLG = 2, //HDR
+};
+
+enum dwb_color_space {
+ DWB_SRGB = 0, //SDR
+ DWB_BT709 = 1, //SDR
+ DWB_BT2020 = 2, //HDR
+};
+
+struct dwb_efc_hdr_metadata {
+ /*display chromaticities and white point in units of 0.00001 */
+ unsigned int chromaticity_green_x;
+ unsigned int chromaticity_green_y;
+ unsigned int chromaticity_blue_x;
+ unsigned int chromaticity_blue_y;
+ unsigned int chromaticity_red_x;
+ unsigned int chromaticity_red_y;
+ unsigned int chromaticity_white_point_x;
+ unsigned int chromaticity_white_point_y;
+
+ /*in units of candelas per square meter */
+ unsigned int min_luminance;
+ unsigned int max_luminance;
+
+ /*in units of nits */
+ unsigned int maximum_content_light_level;
+ unsigned int maximum_frame_average_light_level;
+};
+
+struct dwb_efc_display_settings {
+ unsigned int inputColorVolume;
+ unsigned int inputColorSpace;
+ unsigned int inputBitDepthMinus8;
+ struct dwb_efc_hdr_metadata hdr_metadata;
+ unsigned int dwbOutputBlack; // 0 - Normal, 1 - Output Black
+};
+
+#endif
struct dwb_warmup_params {
bool warmup_en; /* false: normal mode, true: enable pattern generator */
bool warmup_mode; /* false: 420, true: 444 */
@@ -148,6 +209,28 @@ struct dwbc_funcs {
struct dwb_warmup_params *warmup_params);
+#if defined(CONFIG_DRM_AMD_DC_DCN3_0)
+
+ void (*dwb_program_output_csc)(
+ struct dwbc *dwbc,
+ enum dc_color_space color_space,
+ enum dwb_output_csc_mode mode);
+
+ bool (*dwb_ogam_set_output_transfer_func)(
+ struct dwbc *dwbc,
+ const struct dc_transfer_func *in_transfer_func_dwb_ogam);
+
+ void (*get_privacy_mask)(
+ struct dwbc *dwbc, uint32_t *mask_id);
+
+ void (*set_privacy_mask)(
+ struct dwbc *dwbc, uint32_t mask_id);
+
+ //TODO: merge with output_transfer_func?
+ bool (*dwb_ogam_set_input_transfer_func)(
+ struct dwbc *dwbc,
+ const struct dc_transfer_func *in_transfer_func_dwb_ogam);
+#endif
bool (*get_dwb_status)(
struct dwbc *dwbc);
void (*dwb_set_scaler)(
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/hubp.h b/drivers/gpu/drm/amd/display/dc/inc/hw/hubp.h
index 2cb8466e657b..286cceeb9c24 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/hubp.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/hubp.h
@@ -48,6 +48,10 @@ enum cursor_lines_per_chunk {
enum hubp_ind_block_size {
hubp_ind_block_unconstrained = 0,
hubp_ind_block_64b,
+#if defined(CONFIG_DRM_AMD_DC_DCN3_0)
+ hubp_ind_block_128b,
+ hubp_ind_block_64b_no_128bcl,
+#endif
};
struct hubp {
@@ -104,9 +108,6 @@ struct hubp_funcs {
const struct rect *viewport,
const struct rect *viewport_c);
- void (*apply_PLAT_54186_wa)(struct hubp *hubp,
- const struct dc_plane_address *address);
-
bool (*hubp_program_surface_flip_and_addr)(
struct hubp *hubp,
const struct dc_plane_address *address,
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/hw_shared.h b/drivers/gpu/drm/amd/display/dc/inc/hw/hw_shared.h
index 75d419081e76..f62ccf242f56 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/hw_shared.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/hw_shared.h
@@ -150,6 +150,15 @@ enum ipp_degamma_mode {
IPP_DEGAMMA_MODE_USER_PWL
};
+#if defined(CONFIG_DRM_AMD_DC_DCN3_0)
+enum gamcor_mode {
+ GAMCOR_MODE_BYPASS,
+ GAMCOR_MODE_RESERVED_1,
+ GAMCOR_MODE_USER_PWL,
+ GAMCOR_MODE_RESERVED_3
+};
+#endif
+
enum ipp_output_format {
IPP_OUTPUT_FORMAT_12_BIT_FIX,
IPP_OUTPUT_FORMAT_16_BIT_BYPASS,
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/mcif_wb.h b/drivers/gpu/drm/amd/display/dc/inc/hw/mcif_wb.h
index a5c8d92fc5c2..aeab7fd782b8 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/mcif_wb.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/mcif_wb.h
@@ -43,6 +43,9 @@ struct mcif_arb_params {
unsigned int arbitration_slice;
unsigned int slice_lines;
unsigned int max_scaled_time;
+#if defined(CONFIG_DRM_AMD_DC_DCN3_0)
+ unsigned int dram_speed_change_duration;
+#endif
};
struct mcif_irq_params {
@@ -72,6 +75,11 @@ struct mcif_wb {
struct mcif_wb_funcs {
+#if defined(CONFIG_DRM_AMD_DC_DCN3_0)
+ void (*warmup_mcif)(
+ struct mcif_wb *mcif_wb,
+ struct mcif_warmup_params *params);
+#endif
void (*enable_mcif)(struct mcif_wb *mcif_wb);
void (*disable_mcif)(struct mcif_wb *mcif_wb);
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/mpc.h b/drivers/gpu/drm/amd/display/dc/inc/hw/mpc.h
index 50ee8aa7ec3b..b2892eab5e02 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/mpc.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/mpc.h
@@ -27,11 +27,16 @@
#include "dc_hw_types.h"
#include "hw_shared.h"
+#include "transform.h"
#define MAX_MPCC 6
#define MAX_OPP 6
+#if defined(CONFIG_DRM_AMD_DC_DCN3_0)
+#define MAX_DWB 2
+#else
#define MAX_DWB 1
+#endif
enum mpc_output_csc_mode {
MPC_OUTPUT_CSC_DISABLE = 0,
@@ -72,6 +77,12 @@ struct mpcc_blnd_cfg {
int bottom_outside_gain;
};
+#if defined(CONFIG_DRM_AMD_DC_DCN3_0)
+struct mpc_grph_gamut_adjustment {
+ struct fixed31_32 temperature_matrix[CSC_TEMPERATURE_MATRIX_SIZE];
+ enum graphics_gamut_adjust_type gamut_adjust_type;
+};
+#endif
struct mpcc_sm_cfg {
bool enable;
/* 0-single plane,2-row subsampling,4-column subsampling,6-checkboard subsampling */
@@ -95,6 +106,13 @@ struct mpc_denorm_clamp {
int clamp_min_b_cb;
};
+#if defined(CONFIG_DRM_AMD_DC_DCN3_0)
+struct mpc_dwb_flow_control {
+ int flow_ctrl_mode;
+ int flow_ctrl_cnt0;
+ int flow_ctrl_cnt1;
+};
+#endif
/*
* MPCC connection and blending configuration for a single MPCC instance.
* This struct is used as a node in an MPC tree.
@@ -105,6 +123,9 @@ struct mpcc {
struct mpcc *mpcc_bot; /* pointer to bottom layer MPCC. NULL when not connected */
struct mpcc_blnd_cfg blnd_cfg; /* The blending configuration for this MPCC */
struct mpcc_sm_cfg sm_cfg; /* stereo mix setting for this MPCC */
+#if defined(CONFIG_DRM_AMD_DC_DCN3_0)
+ bool shared_bottom; /* TRUE if MPCC output to both OPP and DWB endpoints, else FALSE */
+#endif
};
/*
@@ -226,6 +247,50 @@ struct mpc_funcs {
int opp_id,
bool lock);
+#if defined(CONFIG_DRM_AMD_DC_DCN3_0)
+ /*
+ * Add DPP into 'secondary' MPC tree based on specified blending position.
+ * Only used for planes that are part of blending chain for DWB output
+ *
+ * Parameters:
+ * [in/out] mpc - MPC context.
+ * [in/out] tree - MPC tree structure that plane will be added to.
+ * [in] blnd_cfg - MPCC blending configuration for the new blending layer.
+ * [in] sm_cfg - MPCC stereo mix configuration for the new blending layer.
+ * stereo mix must disable for the very bottom layer of the tree config.
+ * [in] insert_above_mpcc - Insert new plane above this MPCC. If NULL, insert as bottom plane.
+ * [in] dpp_id - DPP instance for the plane to be added.
+ * [in] mpcc_id - The MPCC physical instance to use for blending.
+ *
+ * Return: struct mpcc* - MPCC that was added.
+ */
+ struct mpcc* (*insert_plane_to_secondary)(
+ struct mpc *mpc,
+ struct mpc_tree *tree,
+ struct mpcc_blnd_cfg *blnd_cfg,
+ struct mpcc_sm_cfg *sm_cfg,
+ struct mpcc *insert_above_mpcc,
+ int dpp_id,
+ int mpcc_id);
+
+ /*
+ * Remove a specified DPP from the 'secondary' MPC tree.
+ *
+ * Parameters:
+ * [in/out] mpc - MPC context.
+ * [in/out] tree - MPC tree structure that plane will be removed from.
+ * [in] mpcc - MPCC to be removed from tree.
+ * Return: void
+ */
+ void (*remove_mpcc_from_secondary)(
+ struct mpc *mpc,
+ struct mpc_tree *tree,
+ struct mpcc *mpcc);
+
+ struct mpcc* (*get_mpcc_for_dpp_from_secondary)(
+ struct mpc_tree *tree,
+ int dpp_id);
+#endif
struct mpcc* (*get_mpcc_for_dpp)(
struct mpc_tree *tree,
int dpp_id);
@@ -265,6 +330,49 @@ struct mpc_funcs {
struct mpc *mpc,
int mpcc_id,
bool power_on);
+#if defined(CONFIG_DRM_AMD_DC_DCN3_0)
+ void (*set_dwb_mux)(
+ struct mpc *mpc,
+ int dwb_id,
+ int mpcc_id);
+
+ void (*disable_dwb_mux)(
+ struct mpc *mpc,
+ int dwb_id);
+
+ bool (*is_dwb_idle)(
+ struct mpc *mpc,
+ int dwb_id);
+
+ void (*set_out_rate_control)(
+ struct mpc *mpc,
+ int opp_id,
+ bool enable,
+ bool rate_2x_mode,
+ struct mpc_dwb_flow_control *flow_control);
+#endif
+
+#if defined(CONFIG_DRM_AMD_DC_DCN3_0)
+ void (*set_gamut_remap)(
+ struct mpc *mpc,
+ int mpcc_id,
+ const struct mpc_grph_gamut_adjustment *adjust);
+
+ bool (*program_shaper)(
+ struct mpc *mpc,
+ const struct pwl_params *params,
+ uint32_t rmu_idx);
+
+ uint32_t (*acquire_rmu)(struct mpc *mpc, int mpcc_id, int rmu_idx);
+
+ bool (*program_3dlut)(
+ struct mpc *mpc,
+ const struct tetrahedral_params *params,
+ int rmu_idx);
+
+ int (*release_rmu)(struct mpc *mpc, int mpcc_id);
+
+#endif
};
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/stream_encoder.h b/drivers/gpu/drm/amd/display/dc/inc/hw/stream_encoder.h
index ac6523c0828e..11ce06e69d3f 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/stream_encoder.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/stream_encoder.h
@@ -104,6 +104,10 @@ struct stream_encoder {
struct dc_bios *bp;
enum engine_id id;
uint32_t stream_enc_inst;
+#if defined(CONFIG_DRM_AMD_DC_DCN3_0)
+ struct vpg *vpg;
+ struct afmt *afmt;
+#endif
};
struct enc_state {
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/timing_generator.h b/drivers/gpu/drm/amd/display/dc/inc/hw/timing_generator.h
index f803191e3134..084432132b16 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/timing_generator.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/timing_generator.h
@@ -98,9 +98,19 @@ enum crc_selection {
INTERSECT_WINDOW_NOT_A_NOT_B,
};
+#ifdef CONFIG_DRM_AMD_DC_DCN3_0
+enum otg_out_mux_dest {
+ OUT_MUX_DIO = 0,
+};
+#endif
+
enum h_timing_div_mode {
H_TIMING_NO_DIV,
H_TIMING_DIV_BY2,
+#if defined(CONFIG_DRM_AMD_DC_DCN3_0)
+ H_TIMING_RESERVED,
+ H_TIMING_DIV_BY4,
+#endif
};
struct crc_params {
@@ -278,6 +288,15 @@ struct timing_generator_funcs {
void (*set_gsl_source_select)(struct timing_generator *optc,
int group_idx,
uint32_t gsl_ready_signal);
+#if defined(CONFIG_DRM_AMD_DC_DCN3_0)
+ void (*set_out_mux)(struct timing_generator *tg, enum otg_out_mux_dest dest);
+ void (*set_vrr_m_const)(struct timing_generator *optc,
+ double vtotal_avg);
+ void (*set_drr_trigger_window)(struct timing_generator *optc,
+ uint32_t window_start, uint32_t window_end);
+ void (*set_vtotal_change_limit)(struct timing_generator *optc,
+ uint32_t limit);
+#endif
};
#endif
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h b/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h
index 8e72f077e552..066a2a723c12 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h
@@ -203,6 +203,12 @@ struct hw_sequencer_funcs {
void (*set_abm_immediate_disable)(struct pipe_ctx *pipe_ctx);
+ void (*set_pipe)(struct pipe_ctx *pipe_ctx);
+
+#if defined(CONFIG_DRM_AMD_DC_DCN3_0)
+ /* Idle Optimization Related */
+ bool (*apply_idle_power_optimizations)(struct dc *dc, bool enable);
+#endif
};
diff --git a/drivers/gpu/drm/amd/display/dc/inc/resource.h b/drivers/gpu/drm/amd/display/dc/inc/resource.h
index a9be495af922..dbd74d548de3 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/resource.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/resource.h
@@ -48,6 +48,9 @@ struct resource_caps {
int num_ddc;
int num_vmid;
int num_dsc;
+#if defined(CONFIG_DRM_AMD_DC_DCN3_0)
+ int num_mpc_3dlut;
+#endif
};
struct resource_straps {
diff --git a/drivers/gpu/drm/amd/display/dc/irq/Makefile b/drivers/gpu/drm/amd/display/dc/irq/Makefile
index 0f682ac53bb2..3352b79fb1cb 100644
--- a/drivers/gpu/drm/amd/display/dc/irq/Makefile
+++ b/drivers/gpu/drm/amd/display/dc/irq/Makefile
@@ -83,3 +83,13 @@ AMD_DAL_IRQ_DCN21= $(addprefix $(AMDDALPATH)/dc/irq/dcn21/,$(IRQ_DCN21))
AMD_DISPLAY_FILES += $(AMD_DAL_IRQ_DCN21)
endif
+###############################################################################
+# DCN 30
+###############################################################################
+ifdef CONFIG_DRM_AMD_DC_DCN3_0
+IRQ_DCN3 = irq_service_dcn30.o
+
+AMD_DAL_IRQ_DCN3 = $(addprefix $(AMDDALPATH)/dc/irq/dcn30/,$(IRQ_DCN3))
+
+AMD_DISPLAY_FILES += $(AMD_DAL_IRQ_DCN3)
+endif
diff --git a/drivers/gpu/drm/amd/display/dc/irq/dcn30/irq_service_dcn30.c b/drivers/gpu/drm/amd/display/dc/irq/dcn30/irq_service_dcn30.c
new file mode 100644
index 000000000000..dd4d7a1dc3b6
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/irq/dcn30/irq_service_dcn30.c
@@ -0,0 +1,384 @@
+/*
+ * Copyright 2020 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#if defined(CONFIG_DRM_AMD_DC_DCN3_0)
+
+#include "dm_services.h"
+
+#include "include/logger_interface.h"
+
+#include "../dce110/irq_service_dce110.h"
+
+
+#include "sienna_cichlid_ip_offset.h"
+#include "dcn/dcn_3_0_0_offset.h"
+#include "dcn/dcn_3_0_0_sh_mask.h"
+
+#include "nbio/nbio_7_4_offset.h"
+
+#include "dcn/dpcs_3_0_0_offset.h"
+#include "dcn/dpcs_3_0_0_sh_mask.h"
+
+#include "mmhub/mmhub_2_0_0_offset.h"
+#include "mmhub/mmhub_2_0_0_sh_mask.h"
+
+#include "irq_service_dcn30.h"
+
+#include "ivsrcid/dcn/irqsrcs_dcn_1_0.h"
+
+enum dc_irq_source to_dal_irq_source_dcn30(
+ struct irq_service *irq_service,
+ uint32_t src_id,
+ uint32_t ext_id)
+{
+ switch (src_id) {
+ case DCN_1_0__SRCID__DC_D1_OTG_VSTARTUP:
+ return DC_IRQ_SOURCE_VBLANK1;
+ case DCN_1_0__SRCID__DC_D2_OTG_VSTARTUP:
+ return DC_IRQ_SOURCE_VBLANK2;
+ case DCN_1_0__SRCID__DC_D3_OTG_VSTARTUP:
+ return DC_IRQ_SOURCE_VBLANK3;
+ case DCN_1_0__SRCID__DC_D4_OTG_VSTARTUP:
+ return DC_IRQ_SOURCE_VBLANK4;
+ case DCN_1_0__SRCID__DC_D5_OTG_VSTARTUP:
+ return DC_IRQ_SOURCE_VBLANK5;
+ case DCN_1_0__SRCID__DC_D6_OTG_VSTARTUP:
+ return DC_IRQ_SOURCE_VBLANK6;
+ case DCN_1_0__SRCID__HUBP0_FLIP_INTERRUPT:
+ return DC_IRQ_SOURCE_PFLIP1;
+ case DCN_1_0__SRCID__HUBP1_FLIP_INTERRUPT:
+ return DC_IRQ_SOURCE_PFLIP2;
+ case DCN_1_0__SRCID__HUBP2_FLIP_INTERRUPT:
+ return DC_IRQ_SOURCE_PFLIP3;
+ case DCN_1_0__SRCID__HUBP3_FLIP_INTERRUPT:
+ return DC_IRQ_SOURCE_PFLIP4;
+ case DCN_1_0__SRCID__HUBP4_FLIP_INTERRUPT:
+ return DC_IRQ_SOURCE_PFLIP5;
+ case DCN_1_0__SRCID__HUBP5_FLIP_INTERRUPT:
+ return DC_IRQ_SOURCE_PFLIP6;
+ case DCN_1_0__SRCID__OTG0_IHC_V_UPDATE_NO_LOCK_INTERRUPT:
+ return DC_IRQ_SOURCE_VUPDATE1;
+ case DCN_1_0__SRCID__OTG1_IHC_V_UPDATE_NO_LOCK_INTERRUPT:
+ return DC_IRQ_SOURCE_VUPDATE2;
+ case DCN_1_0__SRCID__OTG2_IHC_V_UPDATE_NO_LOCK_INTERRUPT:
+ return DC_IRQ_SOURCE_VUPDATE3;
+ case DCN_1_0__SRCID__OTG3_IHC_V_UPDATE_NO_LOCK_INTERRUPT:
+ return DC_IRQ_SOURCE_VUPDATE4;
+ case DCN_1_0__SRCID__OTG4_IHC_V_UPDATE_NO_LOCK_INTERRUPT:
+ return DC_IRQ_SOURCE_VUPDATE5;
+ case DCN_1_0__SRCID__OTG5_IHC_V_UPDATE_NO_LOCK_INTERRUPT:
+ return DC_IRQ_SOURCE_VUPDATE6;
+
+ case DCN_1_0__SRCID__DC_HPD1_INT:
+ /* generic src_id for all HPD and HPDRX interrupts */
+ switch (ext_id) {
+ case DCN_1_0__CTXID__DC_HPD1_INT:
+ return DC_IRQ_SOURCE_HPD1;
+ case DCN_1_0__CTXID__DC_HPD2_INT:
+ return DC_IRQ_SOURCE_HPD2;
+ case DCN_1_0__CTXID__DC_HPD3_INT:
+ return DC_IRQ_SOURCE_HPD3;
+ case DCN_1_0__CTXID__DC_HPD4_INT:
+ return DC_IRQ_SOURCE_HPD4;
+ case DCN_1_0__CTXID__DC_HPD5_INT:
+ return DC_IRQ_SOURCE_HPD5;
+ case DCN_1_0__CTXID__DC_HPD6_INT:
+ return DC_IRQ_SOURCE_HPD6;
+ case DCN_1_0__CTXID__DC_HPD1_RX_INT:
+ return DC_IRQ_SOURCE_HPD1RX;
+ case DCN_1_0__CTXID__DC_HPD2_RX_INT:
+ return DC_IRQ_SOURCE_HPD2RX;
+ case DCN_1_0__CTXID__DC_HPD3_RX_INT:
+ return DC_IRQ_SOURCE_HPD3RX;
+ case DCN_1_0__CTXID__DC_HPD4_RX_INT:
+ return DC_IRQ_SOURCE_HPD4RX;
+ case DCN_1_0__CTXID__DC_HPD5_RX_INT:
+ return DC_IRQ_SOURCE_HPD5RX;
+ case DCN_1_0__CTXID__DC_HPD6_RX_INT:
+ return DC_IRQ_SOURCE_HPD6RX;
+ default:
+ return DC_IRQ_SOURCE_INVALID;
+ }
+ break;
+
+ default:
+ return DC_IRQ_SOURCE_INVALID;
+ }
+}
+
+static bool hpd_ack(
+ struct irq_service *irq_service,
+ const struct irq_source_info *info)
+{
+ uint32_t addr = info->status_reg;
+ uint32_t value = dm_read_reg(irq_service->ctx, addr);
+ uint32_t current_status =
+ get_reg_field_value(
+ value,
+ HPD0_DC_HPD_INT_STATUS,
+ DC_HPD_SENSE_DELAYED);
+
+ dal_irq_service_ack_generic(irq_service, info);
+
+ value = dm_read_reg(irq_service->ctx, info->enable_reg);
+
+ set_reg_field_value(
+ value,
+ current_status ? 0 : 1,
+ HPD0_DC_HPD_INT_CONTROL,
+ DC_HPD_INT_POLARITY);
+
+ dm_write_reg(irq_service->ctx, info->enable_reg, value);
+
+ return true;
+}
+
+static const struct irq_source_info_funcs hpd_irq_info_funcs = {
+ .set = NULL,
+ .ack = hpd_ack
+};
+
+static const struct irq_source_info_funcs hpd_rx_irq_info_funcs = {
+ .set = NULL,
+ .ack = NULL
+};
+
+static const struct irq_source_info_funcs pflip_irq_info_funcs = {
+ .set = NULL,
+ .ack = NULL
+};
+
+static const struct irq_source_info_funcs vblank_irq_info_funcs = {
+ .set = NULL,
+ .ack = NULL
+};
+
+#undef BASE_INNER
+#define BASE_INNER(seg) DCN_BASE__INST0_SEG ## seg
+
+/* compile time expand base address. */
+#define BASE(seg) \
+ BASE_INNER(seg)
+
+
+#define SRI(reg_name, block, id)\
+ BASE(mm ## block ## id ## _ ## reg_name ## _BASE_IDX) + \
+ mm ## block ## id ## _ ## reg_name
+
+
+#define IRQ_REG_ENTRY(block, reg_num, reg1, mask1, reg2, mask2)\
+ .enable_reg = SRI(reg1, block, reg_num),\
+ .enable_mask = \
+ block ## reg_num ## _ ## reg1 ## __ ## mask1 ## _MASK,\
+ .enable_value = {\
+ block ## reg_num ## _ ## reg1 ## __ ## mask1 ## _MASK,\
+ ~block ## reg_num ## _ ## reg1 ## __ ## mask1 ## _MASK \
+ },\
+ .ack_reg = SRI(reg2, block, reg_num),\
+ .ack_mask = \
+ block ## reg_num ## _ ## reg2 ## __ ## mask2 ## _MASK,\
+ .ack_value = \
+ block ## reg_num ## _ ## reg2 ## __ ## mask2 ## _MASK \
+
+
+
+#define hpd_int_entry(reg_num)\
+ [DC_IRQ_SOURCE_HPD1 + reg_num] = {\
+ IRQ_REG_ENTRY(HPD, reg_num,\
+ DC_HPD_INT_CONTROL, DC_HPD_INT_EN,\
+ DC_HPD_INT_CONTROL, DC_HPD_INT_ACK),\
+ .status_reg = SRI(DC_HPD_INT_STATUS, HPD, reg_num),\
+ .funcs = &hpd_irq_info_funcs\
+ }
+
+#define hpd_rx_int_entry(reg_num)\
+ [DC_IRQ_SOURCE_HPD1RX + reg_num] = {\
+ IRQ_REG_ENTRY(HPD, reg_num,\
+ DC_HPD_INT_CONTROL, DC_HPD_RX_INT_EN,\
+ DC_HPD_INT_CONTROL, DC_HPD_RX_INT_ACK),\
+ .status_reg = SRI(DC_HPD_INT_STATUS, HPD, reg_num),\
+ .funcs = &hpd_rx_irq_info_funcs\
+ }
+#define pflip_int_entry(reg_num)\
+ [DC_IRQ_SOURCE_PFLIP1 + reg_num] = {\
+ IRQ_REG_ENTRY(HUBPREQ, reg_num,\
+ DCSURF_SURFACE_FLIP_INTERRUPT, SURFACE_FLIP_INT_MASK,\
+ DCSURF_SURFACE_FLIP_INTERRUPT, SURFACE_FLIP_CLEAR),\
+ .funcs = &pflip_irq_info_funcs\
+ }
+
+#define vupdate_int_entry(reg_num)\
+ [DC_IRQ_SOURCE_VUPDATE1 + reg_num] = {\
+ IRQ_REG_ENTRY(OTG, reg_num,\
+ OTG_GLOBAL_SYNC_STATUS, VUPDATE_INT_EN,\
+ OTG_GLOBAL_SYNC_STATUS, VUPDATE_EVENT_CLEAR),\
+ .funcs = &vblank_irq_info_funcs\
+ }
+
+#define vblank_int_entry(reg_num)\
+ [DC_IRQ_SOURCE_VBLANK1 + reg_num] = {\
+ IRQ_REG_ENTRY(OTG, reg_num,\
+ OTG_GLOBAL_SYNC_STATUS, VSTARTUP_INT_EN,\
+ OTG_GLOBAL_SYNC_STATUS, VSTARTUP_EVENT_CLEAR),\
+ .funcs = &vblank_irq_info_funcs\
+ }
+
+#define dummy_irq_entry() \
+ {\
+ .funcs = &dummy_irq_info_funcs\
+ }
+
+#define i2c_int_entry(reg_num) \
+ [DC_IRQ_SOURCE_I2C_DDC ## reg_num] = dummy_irq_entry()
+
+#define dp_sink_int_entry(reg_num) \
+ [DC_IRQ_SOURCE_DPSINK ## reg_num] = dummy_irq_entry()
+
+#define gpio_pad_int_entry(reg_num) \
+ [DC_IRQ_SOURCE_GPIOPAD ## reg_num] = dummy_irq_entry()
+
+#define dc_underflow_int_entry(reg_num) \
+ [DC_IRQ_SOURCE_DC ## reg_num ## UNDERFLOW] = dummy_irq_entry()
+
+static const struct irq_source_info_funcs dummy_irq_info_funcs = {
+ .set = dal_irq_service_dummy_set,
+ .ack = dal_irq_service_dummy_ack
+};
+
+static const struct irq_source_info
+irq_source_info_dcn30[DAL_IRQ_SOURCES_NUMBER] = {
+ [DC_IRQ_SOURCE_INVALID] = dummy_irq_entry(),
+ hpd_int_entry(0),
+ hpd_int_entry(1),
+ hpd_int_entry(2),
+ hpd_int_entry(3),
+ hpd_int_entry(4),
+ hpd_int_entry(5),
+ hpd_rx_int_entry(0),
+ hpd_rx_int_entry(1),
+ hpd_rx_int_entry(2),
+ hpd_rx_int_entry(3),
+ hpd_rx_int_entry(4),
+ hpd_rx_int_entry(5),
+ i2c_int_entry(1),
+ i2c_int_entry(2),
+ i2c_int_entry(3),
+ i2c_int_entry(4),
+ i2c_int_entry(5),
+ i2c_int_entry(6),
+ dp_sink_int_entry(1),
+ dp_sink_int_entry(2),
+ dp_sink_int_entry(3),
+ dp_sink_int_entry(4),
+ dp_sink_int_entry(5),
+ dp_sink_int_entry(6),
+ [DC_IRQ_SOURCE_TIMER] = dummy_irq_entry(),
+ pflip_int_entry(0),
+ pflip_int_entry(1),
+ pflip_int_entry(2),
+ pflip_int_entry(3),
+ [DC_IRQ_SOURCE_PFLIP5] = dummy_irq_entry(),
+ [DC_IRQ_SOURCE_PFLIP6] = dummy_irq_entry(),
+ [DC_IRQ_SOURCE_PFLIP_UNDERLAY0] = dummy_irq_entry(),
+ gpio_pad_int_entry(0),
+ gpio_pad_int_entry(1),
+ gpio_pad_int_entry(2),
+ gpio_pad_int_entry(3),
+ gpio_pad_int_entry(4),
+ gpio_pad_int_entry(5),
+ gpio_pad_int_entry(6),
+ gpio_pad_int_entry(7),
+ gpio_pad_int_entry(8),
+ gpio_pad_int_entry(9),
+ gpio_pad_int_entry(10),
+ gpio_pad_int_entry(11),
+ gpio_pad_int_entry(12),
+ gpio_pad_int_entry(13),
+ gpio_pad_int_entry(14),
+ gpio_pad_int_entry(15),
+ gpio_pad_int_entry(16),
+ gpio_pad_int_entry(17),
+ gpio_pad_int_entry(18),
+ gpio_pad_int_entry(19),
+ gpio_pad_int_entry(20),
+ gpio_pad_int_entry(21),
+ gpio_pad_int_entry(22),
+ gpio_pad_int_entry(23),
+ gpio_pad_int_entry(24),
+ gpio_pad_int_entry(25),
+ gpio_pad_int_entry(26),
+ gpio_pad_int_entry(27),
+ gpio_pad_int_entry(28),
+ gpio_pad_int_entry(29),
+ gpio_pad_int_entry(30),
+ dc_underflow_int_entry(1),
+ dc_underflow_int_entry(2),
+ dc_underflow_int_entry(3),
+ dc_underflow_int_entry(4),
+ dc_underflow_int_entry(5),
+ dc_underflow_int_entry(6),
+ [DC_IRQ_SOURCE_DMCU_SCP] = dummy_irq_entry(),
+ [DC_IRQ_SOURCE_VBIOS_SW] = dummy_irq_entry(),
+ vupdate_int_entry(0),
+ vupdate_int_entry(1),
+ vupdate_int_entry(2),
+ vupdate_int_entry(3),
+ vupdate_int_entry(4),
+ vupdate_int_entry(5),
+ vblank_int_entry(0),
+ vblank_int_entry(1),
+ vblank_int_entry(2),
+ vblank_int_entry(3),
+ vblank_int_entry(4),
+ vblank_int_entry(5),
+};
+
+static const struct irq_service_funcs irq_service_funcs_dcn30 = {
+ .to_dal_irq_source = to_dal_irq_source_dcn30
+};
+
+static void dcn30_irq_construct(
+ struct irq_service *irq_service,
+ struct irq_service_init_data *init_data)
+{
+ dal_irq_service_construct(irq_service, init_data);
+
+ irq_service->info = irq_source_info_dcn30;
+ irq_service->funcs = &irq_service_funcs_dcn30;
+}
+
+struct irq_service *dal_irq_service_dcn30_create(
+ struct irq_service_init_data *init_data)
+{
+ struct irq_service *irq_service = kzalloc(sizeof(*irq_service),
+ GFP_KERNEL);
+
+ if (!irq_service)
+ return NULL;
+
+ dcn30_irq_construct(irq_service, init_data);
+ return irq_service;
+}
+
+#endif
diff --git a/drivers/gpu/drm/amd/display/dc/irq/dcn30/irq_service_dcn30.h b/drivers/gpu/drm/amd/display/dc/irq/dcn30/irq_service_dcn30.h
new file mode 100644
index 000000000000..5a00acaa1a18
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/irq/dcn30/irq_service_dcn30.h
@@ -0,0 +1,37 @@
+/*
+ * Copyright 2020 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#if defined(CONFIG_DRM_AMD_DC_DCN3_0)
+
+#ifndef __DAL_IRQ_SERVICE_DCN30_H__
+#define __DAL_IRQ_SERVICE_DCN30_H__
+
+#include "../irq_service.h"
+
+struct irq_service *dal_irq_service_dcn30_create(
+ struct irq_service_init_data *init_data);
+
+#endif /* __DAL_IRQ_SERVICE_DCN30_H__ */
+#endif
diff --git a/drivers/gpu/drm/amd/display/dc/os_types.h b/drivers/gpu/drm/amd/display/dc/os_types.h
index 6d7bca562eec..c3bbfe397e8d 100644
--- a/drivers/gpu/drm/amd/display/dc/os_types.h
+++ b/drivers/gpu/drm/amd/display/dc/os_types.h
@@ -111,7 +111,15 @@
#define ASSERT(expr) WARN_ON_ONCE(!(expr))
#endif
-#define BREAK_TO_DEBUGGER() ASSERT(0)
+#if defined(CONFIG_DEBUG_KERNEL_DC) && (defined(CONFIG_HAVE_KGDB) || defined(CONFIG_KGDB))
+#define BREAK_TO_DEBUGGER() \
+ do { \
+ DRM_DEBUG_DRIVER("%s():%d\n", __func__, __LINE__); \
+ kgdb_breakpoint(); \
+ } while (0)
+#else
+#define BREAK_TO_DEBUGGER() DRM_DEBUG_DRIVER("%s():%d\n", __func__, __LINE__)
+#endif
#define DC_ERR(...) do { \
dm_error(__VA_ARGS__); \
diff --git a/drivers/gpu/drm/amd/display/dmub/dmub_srv.h b/drivers/gpu/drm/amd/display/dmub/dmub_srv.h
index 26d94eb5ab58..3cac170312fc 100644
--- a/drivers/gpu/drm/amd/display/dmub/dmub_srv.h
+++ b/drivers/gpu/drm/amd/display/dmub/dmub_srv.h
@@ -64,11 +64,7 @@
* other component within DAL.
*/
-#include "inc/dmub_types.h"
#include "inc/dmub_cmd.h"
-#include "inc/dmub_gpint_cmd.h"
-#include "inc/dmub_cmd_dal.h"
-#include "inc/dmub_rb.h"
#if defined(__cplusplus)
extern "C" {
@@ -92,6 +88,9 @@ enum dmub_asic {
DMUB_ASIC_NONE = 0,
DMUB_ASIC_DCN20,
DMUB_ASIC_DCN21,
+#ifdef CONFIG_DRM_AMD_DC_DCN3_0
+ DMUB_ASIC_DCN30,
+#endif
DMUB_ASIC_MAX,
};
@@ -257,13 +256,18 @@ struct dmub_srv_hw_funcs {
void (*set_inbox1_wptr)(struct dmub_srv *dmub, uint32_t wptr_offset);
+ uint32_t (*emul_get_inbox1_rptr)(struct dmub_srv *dmub);
+
+ void (*emul_set_inbox1_wptr)(struct dmub_srv *dmub, uint32_t wptr_offset);
+
bool (*is_supported)(struct dmub_srv *dmub);
bool (*is_hw_init)(struct dmub_srv *dmub);
- bool (*is_phy_init)(struct dmub_srv *dmub);
+ void (*enable_dmub_boot_options)(struct dmub_srv *dmub);
+
+ union dmub_fw_boot_status (*get_fw_status)(struct dmub_srv *dmub);
- bool (*is_auto_load_done)(struct dmub_srv *dmub);
void (*set_gpint)(struct dmub_srv *dmub,
union dmub_gpint_data_register reg);
@@ -280,6 +284,7 @@ struct dmub_srv_hw_funcs {
* @hw_funcs: optional overrides for hw funcs
* @user_ctx: context data for callback funcs
* @asic: driver supplied asic
+ * @fw_version: the current firmware version, if any
* @is_virtual: false for hw support only
*/
struct dmub_srv_create_params {
@@ -287,6 +292,7 @@ struct dmub_srv_create_params {
struct dmub_srv_hw_funcs *hw_funcs;
void *user_ctx;
enum dmub_asic asic;
+ uint32_t fw_version;
bool is_virtual;
};
@@ -310,12 +316,14 @@ struct dmub_srv_hw_params {
* struct dmub_srv - software state for dmcub
* @asic: dmub asic identifier
* @user_ctx: user provided context for the dmub_srv
+ * @fw_version: the current firmware version, if any
* @is_virtual: false if hardware support only
* @fw_state: dmub firmware state pointer
*/
struct dmub_srv {
enum dmub_asic asic;
void *user_ctx;
+ uint32_t fw_version;
bool is_virtual;
struct dmub_fb scratch_mem_fb;
volatile const struct dmub_fw_state *fw_state;
@@ -336,6 +344,13 @@ struct dmub_srv {
};
/**
+ * DMUB firmware version helper macro - useful for checking if the version
+ * of a firmware to know if feature or functionality is supported or present.
+ */
+#define DMUB_FW_VERSION(major, minor, revision) \
+ ((((major) & 0xFF) << 24) | (((minor) & 0xFF) << 16) | ((revision) & 0xFFFF))
+
+/**
* dmub_srv_create() - creates the DMUB service.
* @dmub: the dmub service
* @params: creation parameters for the service
diff --git a/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h b/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h
index 599bf2055bcb..68b5fd811d26 100644
--- a/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h
+++ b/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h
@@ -26,17 +26,253 @@
#ifndef _DMUB_CMD_H_
#define _DMUB_CMD_H_
-#include "dmub_types.h"
-#include "dmub_cmd_dal.h"
-#include "dmub_cmd_vbios.h"
+#include <asm/byteorder.h>
+#include <linux/types.h>
+#include <linux/string.h>
+#include <linux/delay.h>
+#include <stdarg.h>
+
#include "atomfirmware.h"
+/* Firmware versioning. */
+#ifdef DMUB_EXPOSE_VERSION
+#define DMUB_FW_VERSION_GIT_HASH 0xf87bb940b
+#define DMUB_FW_VERSION_MAJOR 1
+#define DMUB_FW_VERSION_MINOR 0
+#define DMUB_FW_VERSION_REVISION 19
+#define DMUB_FW_VERSION_UCODE ((DMUB_FW_VERSION_MAJOR << 24) | (DMUB_FW_VERSION_MINOR << 16) | DMUB_FW_VERSION_REVISION)
+#endif
+
+//<DMUB_TYPES>==================================================================
+/* Basic type definitions. */
+
+#define SET_ABM_PIPE_GRADUALLY_DISABLE 0
+#define SET_ABM_PIPE_IMMEDIATELY_DISABLE 255
+#define SET_ABM_PIPE_NORMAL 1
+
+/* Maximum number of streams on any ASIC. */
+#define DMUB_MAX_STREAMS 6
+
+/* Maximum number of planes on any ASIC. */
+#define DMUB_MAX_PLANES 6
+
+#ifndef PHYSICAL_ADDRESS_LOC
+#define PHYSICAL_ADDRESS_LOC union large_integer
+#endif
+
+#if defined(__cplusplus)
+extern "C" {
+#endif
+
+#ifndef dmub_memcpy
+#define dmub_memcpy(dest, source, bytes) memcpy((dest), (source), (bytes))
+#endif
+
+#ifndef dmub_memset
+#define dmub_memset(dest, val, bytes) memset((dest), (val), (bytes))
+#endif
+
+#ifndef dmub_udelay
+#define dmub_udelay(microseconds) udelay(microseconds)
+#endif
+
+union dmub_addr {
+ struct {
+ uint32_t low_part;
+ uint32_t high_part;
+ } u;
+ uint64_t quad_part;
+};
+
+union dmub_psr_debug_flags {
+ struct {
+ uint32_t visual_confirm : 1;
+ uint32_t use_hw_lock_mgr : 1;
+ } bitfields;
+
+ uint32_t u32All;
+};
+
+#if defined(__cplusplus)
+}
+#endif
+
+
+
+//==============================================================================
+//</DMUB_TYPES>=================================================================
+//==============================================================================
+//< DMUB_META>==================================================================
+//==============================================================================
+#pragma pack(push, 1)
+
+/* Magic value for identifying dmub_fw_meta_info */
+#define DMUB_FW_META_MAGIC 0x444D5542
+
+/* Offset from the end of the file to the dmub_fw_meta_info */
+#define DMUB_FW_META_OFFSET 0x24
+
+/**
+ * struct dmub_fw_meta_info - metadata associated with fw binary
+ *
+ * NOTE: This should be considered a stable API. Fields should
+ * not be repurposed or reordered. New fields should be
+ * added instead to extend the structure.
+ *
+ * @magic_value: magic value identifying DMUB firmware meta info
+ * @fw_region_size: size of the firmware state region
+ * @trace_buffer_size: size of the tracebuffer region
+ * @fw_version: the firmware version information
+ */
+struct dmub_fw_meta_info {
+ uint32_t magic_value;
+ uint32_t fw_region_size;
+ uint32_t trace_buffer_size;
+ uint32_t fw_version;
+};
+
+/* Ensure that the structure remains 64 bytes. */
+union dmub_fw_meta {
+ struct dmub_fw_meta_info info;
+ uint8_t reserved[64];
+};
+
+#pragma pack(pop)
+
+//==============================================================================
+//< DMUB_STATUS>================================================================
+//==============================================================================
+
+/**
+ * DMCUB scratch registers can be used to determine firmware status.
+ * Current scratch register usage is as follows:
+ *
+ * SCRATCH0: FW Boot Status register
+ * SCRATCH15: FW Boot Options register
+ */
+
+/**
+ * DMCUB firmware status bits for SCRATCH2.
+ */
+enum dmub_fw_status_bit {
+ DMUB_FW_STATUS_BIT_DAL_FIRMWARE = (1 << 0),
+ DMUB_FW_STATUS_BIT_COMMAND_TABLE_READY = (1 << 1),
+};
+
+
+/* Register bit definition for SCRATCH0 */
+union dmub_fw_boot_status {
+ struct {
+ uint32_t dal_fw : 1;
+ uint32_t mailbox_rdy : 1;
+ uint32_t optimized_init_done : 1;
+ uint32_t reserved : 29;
+ } bits;
+ uint32_t all;
+};
+
+enum dmub_fw_boot_status_bit {
+ DMUB_FW_BOOT_STATUS_BIT_DAL_FIRMWARE = (1 << 0),
+ DMUB_FW_BOOT_STATUS_BIT_MAILBOX_READY = (1 << 1),
+ DMUB_FW_BOOT_STATUS_BIT_OPTIMIZED_INIT_DONE = (1 << 2),
+};
+
+/* Register bit definition for SCRATCH15 */
+union dmub_fw_boot_options {
+ struct {
+ uint32_t pemu_env : 1;
+ uint32_t fpga_env : 1;
+ uint32_t optimized_init : 1;
+ uint32_t reserved : 29;
+ } bits;
+ uint32_t all;
+};
+
+enum dmub_fw_boot_options_bit {
+ DMUB_FW_BOOT_OPTION_BIT_PEMU_ENV = (1 << 0),
+ DMUB_FW_BOOT_OPTION_BIT_FPGA_ENV = (1 << 1),
+ DMUB_FW_BOOT_OPTION_BIT_OPTIMIZED_INIT_DONE = (1 << 2),
+};
+
+//==============================================================================
+//</DMUB_STATUS>================================================================
+//==============================================================================
+//< DMUB_VBIOS>=================================================================
+//==============================================================================
+
+/*
+ * Command IDs should be treated as stable ABI.
+ * Do not reuse or modify IDs.
+ */
+
+enum dmub_cmd_vbios_type {
+ DMUB_CMD__VBIOS_DIGX_ENCODER_CONTROL = 0,
+ DMUB_CMD__VBIOS_DIG1_TRANSMITTER_CONTROL = 1,
+ DMUB_CMD__VBIOS_SET_PIXEL_CLOCK = 2,
+ DMUB_CMD__VBIOS_ENABLE_DISP_POWER_GATING = 3,
+};
+
+//==============================================================================
+//</DMUB_VBIOS>=================================================================
+//==============================================================================
+//< DMUB_GPINT>=================================================================
+//==============================================================================
+
+/**
+ * The shifts and masks below may alternatively be used to format and read
+ * the command register bits.
+ */
+
+#define DMUB_GPINT_DATA_PARAM_MASK 0xFFFF
+#define DMUB_GPINT_DATA_PARAM_SHIFT 0
+
+#define DMUB_GPINT_DATA_COMMAND_CODE_MASK 0xFFF
+#define DMUB_GPINT_DATA_COMMAND_CODE_SHIFT 16
+
+#define DMUB_GPINT_DATA_STATUS_MASK 0xF
+#define DMUB_GPINT_DATA_STATUS_SHIFT 28
+
+/**
+ * Command responses.
+ */
+
+#define DMUB_GPINT__STOP_FW_RESPONSE 0xDEADDEAD
+
+/**
+ * The register format for sending a command via the GPINT.
+ */
+union dmub_gpint_data_register {
+ struct {
+ uint32_t param : 16;
+ uint32_t command_code : 12;
+ uint32_t status : 4;
+ } bits;
+ uint32_t all;
+};
+
+/*
+ * Command IDs should be treated as stable ABI.
+ * Do not reuse or modify IDs.
+ */
+
+enum dmub_gpint_command {
+ DMUB_GPINT__INVALID_COMMAND = 0,
+ DMUB_GPINT__GET_FW_VERSION = 1,
+ DMUB_GPINT__STOP_FW = 2,
+ DMUB_GPINT__GET_PSR_STATE = 7,
+};
+
+//==============================================================================
+//</DMUB_GPINT>=================================================================
+//==============================================================================
+//< DMUB_CMD>===================================================================
+//==============================================================================
+
#define DMUB_RB_CMD_SIZE 64
#define DMUB_RB_MAX_ENTRY 128
#define DMUB_RB_SIZE (DMUB_RB_CMD_SIZE * DMUB_RB_MAX_ENTRY)
#define REG_SET_MASK 0xFFFF
-
/*
* Command IDs should be treated as stable ABI.
* Do not reuse or modify IDs.
@@ -51,6 +287,7 @@ enum dmub_cmd_type {
DMUB_CMD__PLAT_54186_WA = 5,
DMUB_CMD__PSR = 64,
DMUB_CMD__ABM = 66,
+ DMUB_CMD__HW_LOCK = 69,
DMUB_CMD__VBIOS = 128,
};
@@ -106,14 +343,12 @@ struct dmub_cmd_reg_field_update_sequence {
};
#define DMUB_REG_FIELD_UPDATE_SEQ__MAX 7
-
struct dmub_rb_cmd_reg_field_update_sequence {
struct dmub_cmd_header header;
uint32_t addr;
struct dmub_cmd_reg_field_update_sequence seq[DMUB_REG_FIELD_UPDATE_SEQ__MAX];
};
-
/*
* Burst write
*
@@ -148,10 +383,6 @@ struct dmub_rb_cmd_reg_wait {
struct dmub_cmd_reg_wait_data reg_wait;
};
-#ifndef PHYSICAL_ADDRESS_LOC
-#define PHYSICAL_ADDRESS_LOC union large_integer
-#endif
-
struct dmub_cmd_PLAT_54186_wa {
uint32_t DCSURF_SURFACE_CONTROL;
uint32_t DCSURF_PRIMARY_SURFACE_ADDRESS_HIGH;
@@ -215,7 +446,26 @@ struct dmub_rb_cmd_dpphy_init {
uint8_t reserved[60];
};
+/*
+ * Command IDs should be treated as stable ABI.
+ * Do not reuse or modify IDs.
+ */
+
+enum dmub_cmd_psr_type {
+ DMUB_CMD__PSR_SET_VERSION = 0,
+ DMUB_CMD__PSR_COPY_SETTINGS = 1,
+ DMUB_CMD__PSR_ENABLE = 2,
+ DMUB_CMD__PSR_DISABLE = 3,
+ DMUB_CMD__PSR_SET_LEVEL = 4,
+};
+
+enum psr_version {
+ PSR_VERSION_1 = 0,
+ PSR_VERSION_UNSUPPORTED = 0xFFFFFFFF,
+};
+
struct dmub_cmd_psr_copy_settings_data {
+ union dmub_psr_debug_flags debug;
uint16_t psr_level;
uint8_t dpp_inst;
uint8_t mpcc_inst;
@@ -228,7 +478,9 @@ struct dmub_cmd_psr_copy_settings_data {
uint8_t smu_optimizations_en;
uint8_t frame_delay;
uint8_t frame_cap_ind;
- struct dmub_psr_debug_flags debug;
+ uint8_t pad[3];
+ uint16_t init_sdp_deadline;
+ uint16_t pad2;
};
struct dmub_rb_cmd_psr_copy_settings {
@@ -238,6 +490,7 @@ struct dmub_rb_cmd_psr_copy_settings {
struct dmub_cmd_psr_set_level_data {
uint16_t psr_level;
+ uint8_t pad[2];
};
struct dmub_rb_cmd_psr_set_level {
@@ -258,11 +511,93 @@ struct dmub_rb_cmd_psr_set_version {
struct dmub_cmd_psr_set_version_data psr_set_version_data;
};
+union dmub_hw_lock_flags {
+ struct {
+ uint8_t lock_pipe : 1;
+ uint8_t lock_cursor : 1;
+ uint8_t lock_dig : 1;
+ uint8_t triple_buffer_lock : 1;
+ } bits;
+
+ uint8_t u8All;
+};
+
+struct dmub_hw_lock_inst_flags {
+ uint8_t otg_inst;
+ uint8_t opp_inst;
+ uint8_t dig_inst;
+ uint8_t pad;
+};
+
+enum hw_lock_client {
+ HW_LOCK_CLIENT_DRIVER = 0,
+ HW_LOCK_CLIENT_FW,
+ HW_LOCK_CLIENT_INVALID = 0xFFFFFFFF,
+};
+
+struct dmub_cmd_lock_hw_data {
+ enum hw_lock_client client;
+ struct dmub_hw_lock_inst_flags inst_flags;
+ union dmub_hw_lock_flags hw_locks;
+ uint8_t lock;
+ uint8_t should_release;
+ uint8_t pad;
+};
+
+struct dmub_rb_cmd_lock_hw {
+ struct dmub_cmd_header header;
+ struct dmub_cmd_lock_hw_data lock_hw_data;
+};
+
+enum dmub_cmd_abm_type {
+ DMUB_CMD__ABM_INIT_CONFIG = 0,
+ DMUB_CMD__ABM_SET_PIPE = 1,
+ DMUB_CMD__ABM_SET_BACKLIGHT = 2,
+ DMUB_CMD__ABM_SET_LEVEL = 3,
+ DMUB_CMD__ABM_SET_AMBIENT_LEVEL = 4,
+ DMUB_CMD__ABM_SET_PWM_FRAC = 5,
+};
+
+#define NUM_AMBI_LEVEL 5
+#define NUM_AGGR_LEVEL 4
+#define NUM_POWER_FN_SEGS 8
+#define NUM_BL_CURVE_SEGS 16
+
+/*
+ * Parameters for ABM2.4 algorithm.
+ * Padded explicitly to 32-bit boundary.
+ */
+struct abm_config_table {
+ /* Parameters for crgb conversion */
+ uint16_t crgb_thresh[NUM_POWER_FN_SEGS]; // 0B
+ uint16_t crgb_offset[NUM_POWER_FN_SEGS]; // 15B
+ uint16_t crgb_slope[NUM_POWER_FN_SEGS]; // 31B
+
+ /* Parameters for custom curve */
+ uint16_t backlight_thresholds[NUM_BL_CURVE_SEGS]; // 47B
+ uint16_t backlight_offsets[NUM_BL_CURVE_SEGS]; // 79B
+
+ uint16_t ambient_thresholds_lux[NUM_AMBI_LEVEL]; // 111B
+ uint16_t min_abm_backlight; // 121B
+
+ uint8_t min_reduction[NUM_AMBI_LEVEL][NUM_AGGR_LEVEL]; // 123B
+ uint8_t max_reduction[NUM_AMBI_LEVEL][NUM_AGGR_LEVEL]; // 143B
+ uint8_t bright_pos_gain[NUM_AMBI_LEVEL][NUM_AGGR_LEVEL]; // 163B
+ uint8_t dark_pos_gain[NUM_AMBI_LEVEL][NUM_AGGR_LEVEL]; // 183B
+ uint8_t hybrid_factor[NUM_AGGR_LEVEL]; // 203B
+ uint8_t contrast_factor[NUM_AGGR_LEVEL]; // 207B
+ uint8_t deviation_gain[NUM_AGGR_LEVEL]; // 211B
+ uint8_t min_knee[NUM_AGGR_LEVEL]; // 215B
+ uint8_t max_knee[NUM_AGGR_LEVEL]; // 219B
+ uint8_t iir_curve[NUM_AMBI_LEVEL]; // 223B
+ uint8_t pad3[3]; // 228B
+};
+
struct dmub_cmd_abm_set_pipe_data {
- uint32_t ramping_boundary;
- uint32_t otg_inst;
- uint32_t panel_inst;
- uint32_t set_pipe_option;
+ uint8_t otg_inst;
+ uint8_t panel_inst;
+ uint8_t set_pipe_option;
+ uint8_t ramping_boundary; // TODO: Remove this
};
struct dmub_rb_cmd_abm_set_pipe {
@@ -272,6 +607,7 @@ struct dmub_rb_cmd_abm_set_pipe {
struct dmub_cmd_abm_set_backlight_data {
uint32_t frame_ramp;
+ uint32_t backlight_user_level;
};
struct dmub_rb_cmd_abm_set_backlight {
@@ -317,6 +653,7 @@ struct dmub_rb_cmd_abm_init_config {
};
union dmub_rb_cmd {
+ struct dmub_rb_cmd_lock_hw lock_hw;
struct dmub_rb_cmd_read_modify_write read_modify_write;
struct dmub_rb_cmd_reg_field_update_sequence reg_field_update_seq;
struct dmub_rb_cmd_burst_write burst_write;
@@ -342,4 +679,137 @@ union dmub_rb_cmd {
#pragma pack(pop)
+
+//==============================================================================
+//</DMUB_CMD>===================================================================
+//==============================================================================
+//< DMUB_RB>====================================================================
+//==============================================================================
+
+#if defined(__cplusplus)
+extern "C" {
+#endif
+
+struct dmub_rb_init_params {
+ void *ctx;
+ void *base_address;
+ uint32_t capacity;
+ uint32_t read_ptr;
+ uint32_t write_ptr;
+};
+
+struct dmub_rb {
+ void *base_address;
+ uint32_t data_count;
+ uint32_t rptr;
+ uint32_t wrpt;
+ uint32_t capacity;
+
+ void *ctx;
+ void *dmub;
+};
+
+
+static inline bool dmub_rb_empty(struct dmub_rb *rb)
+{
+ return (rb->wrpt == rb->rptr);
+}
+
+static inline bool dmub_rb_full(struct dmub_rb *rb)
+{
+ uint32_t data_count;
+
+ if (rb->wrpt >= rb->rptr)
+ data_count = rb->wrpt - rb->rptr;
+ else
+ data_count = rb->capacity - (rb->rptr - rb->wrpt);
+
+ return (data_count == (rb->capacity - DMUB_RB_CMD_SIZE));
+}
+
+static inline bool dmub_rb_push_front(struct dmub_rb *rb,
+ const union dmub_rb_cmd *cmd)
+{
+ uint64_t volatile *dst = (uint64_t volatile *)(rb->base_address) + rb->wrpt / sizeof(uint64_t);
+ const uint64_t *src = (const uint64_t *)cmd;
+ int i;
+
+ if (dmub_rb_full(rb))
+ return false;
+
+ // copying data
+ for (i = 0; i < DMUB_RB_CMD_SIZE / sizeof(uint64_t); i++)
+ *dst++ = *src++;
+
+ rb->wrpt += DMUB_RB_CMD_SIZE;
+
+ if (rb->wrpt >= rb->capacity)
+ rb->wrpt %= rb->capacity;
+
+ return true;
+}
+
+static inline bool dmub_rb_front(struct dmub_rb *rb,
+ union dmub_rb_cmd *cmd)
+{
+ uint8_t *rd_ptr = (uint8_t *)rb->base_address + rb->rptr;
+
+ if (dmub_rb_empty(rb))
+ return false;
+
+ dmub_memcpy(cmd, rd_ptr, DMUB_RB_CMD_SIZE);
+
+ return true;
+}
+
+static inline bool dmub_rb_pop_front(struct dmub_rb *rb)
+{
+ if (dmub_rb_empty(rb))
+ return false;
+
+ rb->rptr += DMUB_RB_CMD_SIZE;
+
+ if (rb->rptr >= rb->capacity)
+ rb->rptr %= rb->capacity;
+
+ return true;
+}
+
+static inline void dmub_rb_flush_pending(const struct dmub_rb *rb)
+{
+ uint32_t rptr = rb->rptr;
+ uint32_t wptr = rb->wrpt;
+
+ while (rptr != wptr) {
+ uint64_t volatile *data = (uint64_t volatile *)rb->base_address + rptr / sizeof(uint64_t);
+ //uint64_t volatile *p = (uint64_t volatile *)data;
+ uint64_t temp;
+ int i;
+
+ for (i = 0; i < DMUB_RB_CMD_SIZE / sizeof(uint64_t); i++)
+ temp = *data++;
+
+ rptr += DMUB_RB_CMD_SIZE;
+ if (rptr >= rb->capacity)
+ rptr %= rb->capacity;
+ }
+}
+
+static inline void dmub_rb_init(struct dmub_rb *rb,
+ struct dmub_rb_init_params *init_params)
+{
+ rb->base_address = init_params->base_address;
+ rb->capacity = init_params->capacity;
+ rb->rptr = init_params->read_ptr;
+ rb->wrpt = init_params->write_ptr;
+}
+
+#if defined(__cplusplus)
+}
+#endif
+
+//==============================================================================
+//</DMUB_RB>====================================================================
+//==============================================================================
+
#endif /* _DMUB_CMD_H_ */
diff --git a/drivers/gpu/drm/amd/display/dmub/inc/dmub_gpint_cmd.h b/drivers/gpu/drm/amd/display/dmub/inc/dmub_gpint_cmd.h
deleted file mode 100644
index 652d6fc061b6..000000000000
--- a/drivers/gpu/drm/amd/display/dmub/inc/dmub_gpint_cmd.h
+++ /dev/null
@@ -1,75 +0,0 @@
-/*
- * Copyright 2019 Advanced Micro Devices, Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- * Authors: AMD
- *
- */
-
-#ifndef _DMUB_GPINT_CMD_H_
-#define _DMUB_GPINT_CMD_H_
-
-#include "dmub_types.h"
-
-/**
- * The register format for sending a command via the GPINT.
- */
-union dmub_gpint_data_register {
- struct {
- uint32_t param : 16;
- uint32_t command_code : 12;
- uint32_t status : 4;
- } bits;
- uint32_t all;
-};
-
-/**
- * The shifts and masks below may alternatively be used to format and read
- * the command register bits.
- */
-
-#define DMUB_GPINT_DATA_PARAM_MASK 0xFFFF
-#define DMUB_GPINT_DATA_PARAM_SHIFT 0
-
-#define DMUB_GPINT_DATA_COMMAND_CODE_MASK 0xFFF
-#define DMUB_GPINT_DATA_COMMAND_CODE_SHIFT 16
-
-#define DMUB_GPINT_DATA_STATUS_MASK 0xF
-#define DMUB_GPINT_DATA_STATUS_SHIFT 28
-
-/*
- * Command IDs should be treated as stable ABI.
- * Do not reuse or modify IDs.
- */
-
-enum dmub_gpint_command {
- DMUB_GPINT__INVALID_COMMAND = 0,
- DMUB_GPINT__GET_FW_VERSION = 1,
- DMUB_GPINT__STOP_FW = 2,
- DMUB_GPINT__GET_PSR_STATE = 7,
-};
-
-/**
- * Command responses.
- */
-
-#define DMUB_GPINT__STOP_FW_RESPONSE 0xDEADDEAD
-
-#endif /* _DMUB_GPINT_CMD_H_ */
diff --git a/drivers/gpu/drm/amd/display/dmub/inc/dmub_rb.h b/drivers/gpu/drm/amd/display/dmub/inc/dmub_rb.h
deleted file mode 100644
index 2ae48c18bb5b..000000000000
--- a/drivers/gpu/drm/amd/display/dmub/inc/dmub_rb.h
+++ /dev/null
@@ -1,152 +0,0 @@
-/*
- * Copyright 2019 Advanced Micro Devices, Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- * Authors: AMD
- *
- */
-
-#ifndef _DMUB_RB_H_
-#define _DMUB_RB_H_
-
-#include "dmub_types.h"
-#include "dmub_cmd.h"
-
-#if defined(__cplusplus)
-extern "C" {
-#endif
-
-struct dmub_rb_init_params {
- void *ctx;
- void *base_address;
- uint32_t capacity;
-};
-
-struct dmub_rb {
- void *base_address;
- uint32_t data_count;
- uint32_t rptr;
- uint32_t wrpt;
- uint32_t capacity;
-
- void *ctx;
- void *dmub;
-};
-
-
-static inline bool dmub_rb_empty(struct dmub_rb *rb)
-{
- return (rb->wrpt == rb->rptr);
-}
-
-static inline bool dmub_rb_full(struct dmub_rb *rb)
-{
- uint32_t data_count;
-
- if (rb->wrpt >= rb->rptr)
- data_count = rb->wrpt - rb->rptr;
- else
- data_count = rb->capacity - (rb->rptr - rb->wrpt);
-
- return (data_count == (rb->capacity - DMUB_RB_CMD_SIZE));
-}
-
-static inline bool dmub_rb_push_front(struct dmub_rb *rb,
- const union dmub_rb_cmd *cmd)
-{
- uint64_t volatile *dst = (uint64_t volatile *)(rb->base_address) + rb->wrpt / sizeof(uint64_t);
- const uint64_t *src = (const uint64_t *)cmd;
- int i;
-
- if (dmub_rb_full(rb))
- return false;
-
- // copying data
- for (i = 0; i < DMUB_RB_CMD_SIZE / sizeof(uint64_t); i++)
- *dst++ = *src++;
-
- rb->wrpt += DMUB_RB_CMD_SIZE;
-
- if (rb->wrpt >= rb->capacity)
- rb->wrpt %= rb->capacity;
-
- return true;
-}
-
-static inline bool dmub_rb_front(struct dmub_rb *rb,
- union dmub_rb_cmd *cmd)
-{
- uint8_t *rd_ptr = (uint8_t *)rb->base_address + rb->rptr;
-
- if (dmub_rb_empty(rb))
- return false;
-
- dmub_memcpy(cmd, rd_ptr, DMUB_RB_CMD_SIZE);
-
- return true;
-}
-
-static inline bool dmub_rb_pop_front(struct dmub_rb *rb)
-{
- if (dmub_rb_empty(rb))
- return false;
-
- rb->rptr += DMUB_RB_CMD_SIZE;
-
- if (rb->rptr >= rb->capacity)
- rb->rptr %= rb->capacity;
-
- return true;
-}
-
-static inline void dmub_rb_flush_pending(const struct dmub_rb *rb)
-{
- uint32_t rptr = rb->rptr;
- uint32_t wptr = rb->wrpt;
-
- while (rptr != wptr) {
- uint64_t volatile *data = (uint64_t volatile *)rb->base_address + rptr / sizeof(uint64_t);
- //uint64_t volatile *p = (uint64_t volatile *)data;
- uint64_t temp;
- int i;
-
- for (i = 0; i < DMUB_RB_CMD_SIZE / sizeof(uint64_t); i++)
- temp = *data++;
-
- rptr += DMUB_RB_CMD_SIZE;
- if (rptr >= rb->capacity)
- rptr %= rb->capacity;
- }
-}
-
-static inline void dmub_rb_init(struct dmub_rb *rb,
- struct dmub_rb_init_params *init_params)
-{
- rb->base_address = init_params->base_address;
- rb->capacity = init_params->capacity;
- rb->rptr = 0;
- rb->wrpt = 0;
-}
-
-#if defined(__cplusplus)
-}
-#endif
-
-#endif /* _DMUB_RB_H_ */
diff --git a/drivers/gpu/drm/amd/display/dmub/src/Makefile b/drivers/gpu/drm/amd/display/dmub/src/Makefile
index e08dfeea24b0..bb584f39cad0 100644
--- a/drivers/gpu/drm/amd/display/dmub/src/Makefile
+++ b/drivers/gpu/drm/amd/display/dmub/src/Makefile
@@ -21,6 +21,9 @@
#
DMUB = dmub_srv.o dmub_reg.o dmub_dcn20.o dmub_dcn21.o
+ifdef CONFIG_DRM_AMD_DC_DCN3_0
+DMUB += dmub_dcn30.o
+endif
AMD_DAL_DMUB = $(addprefix $(AMDDALPATH)/dmub/src/,$(DMUB))
diff --git a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn20.c b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn20.c
index edc73d6d7ba2..0cd78e745e7e 100644
--- a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn20.c
+++ b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn20.c
@@ -215,11 +215,22 @@ void dmub_dcn20_setup_windows(struct dmub_srv *dmub,
/* TODO: Move this to CW4. */
dmub_dcn20_translate_addr(&cw4->offset, fb_base, fb_offset, &offset);
- REG_WRITE(DMCUB_REGION4_OFFSET, offset.u.low_part);
- REG_WRITE(DMCUB_REGION4_OFFSET_HIGH, offset.u.high_part);
- REG_SET_2(DMCUB_REGION4_TOP_ADDRESS, 0, DMCUB_REGION4_TOP_ADDRESS,
- cw4->region.top - cw4->region.base - 1, DMCUB_REGION4_ENABLE,
- 1);
+ /* New firmware can support CW4. */
+ if (dmub->fw_version > DMUB_FW_VERSION(1, 0, 10)) {
+ REG_WRITE(DMCUB_REGION3_CW4_OFFSET, offset.u.low_part);
+ REG_WRITE(DMCUB_REGION3_CW4_OFFSET_HIGH, offset.u.high_part);
+ REG_WRITE(DMCUB_REGION3_CW4_BASE_ADDRESS, cw4->region.base);
+ REG_SET_2(DMCUB_REGION3_CW4_TOP_ADDRESS, 0,
+ DMCUB_REGION3_CW4_TOP_ADDRESS, cw4->region.top,
+ DMCUB_REGION3_CW4_ENABLE, 1);
+ } else {
+ REG_WRITE(DMCUB_REGION4_OFFSET, offset.u.low_part);
+ REG_WRITE(DMCUB_REGION4_OFFSET_HIGH, offset.u.high_part);
+ REG_SET_2(DMCUB_REGION4_TOP_ADDRESS, 0,
+ DMCUB_REGION4_TOP_ADDRESS,
+ cw4->region.top - cw4->region.base - 1,
+ DMCUB_REGION4_ENABLE, 1);
+ }
dmub_dcn20_translate_addr(&cw5->offset, fb_base, fb_offset, &offset);
@@ -243,9 +254,12 @@ void dmub_dcn20_setup_windows(struct dmub_srv *dmub,
void dmub_dcn20_setup_mailbox(struct dmub_srv *dmub,
const struct dmub_region *inbox1)
{
- /* TODO: Use CW4 instead of region 4. */
+ /* New firmware can support CW4 for the inbox. */
+ if (dmub->fw_version > DMUB_FW_VERSION(1, 0, 10))
+ REG_WRITE(DMCUB_INBOX1_BASE_ADDRESS, inbox1->base);
+ else
+ REG_WRITE(DMCUB_INBOX1_BASE_ADDRESS, 0x80000000);
- REG_WRITE(DMCUB_INBOX1_BASE_ADDRESS, 0x80000000);
REG_WRITE(DMCUB_INBOX1_SIZE, inbox1->top - inbox1->base);
}
@@ -261,7 +275,11 @@ void dmub_dcn20_set_inbox1_wptr(struct dmub_srv *dmub, uint32_t wptr_offset)
bool dmub_dcn20_is_hw_init(struct dmub_srv *dmub)
{
- return REG_READ(DMCUB_REGION3_CW2_BASE_ADDRESS) != 0;
+ uint32_t is_hw_init;
+
+ REG_GET(DMCUB_CNTL, DMCUB_ENABLE, &is_hw_init);
+
+ return is_hw_init != 0;
}
bool dmub_dcn20_is_supported(struct dmub_srv *dmub)
@@ -294,3 +312,18 @@ uint32_t dmub_dcn20_get_gpint_response(struct dmub_srv *dmub)
{
return REG_READ(DMCUB_SCRATCH7);
}
+
+union dmub_fw_boot_status dmub_dcn20_get_fw_boot_status(struct dmub_srv *dmub)
+{
+ union dmub_fw_boot_status status;
+
+ status.all = REG_READ(DMCUB_SCRATCH0);
+ return status;
+}
+
+void dmub_dcn20_enable_dmub_boot_options(struct dmub_srv *dmub)
+{
+ union dmub_fw_boot_options boot_options = {0};
+
+ REG_WRITE(DMCUB_SCRATCH14, boot_options.all);
+}
diff --git a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn20.h b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn20.h
index 7f046c73927e..a27b509cd6fd 100644
--- a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn20.h
+++ b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn20.h
@@ -26,7 +26,7 @@
#ifndef _DMUB_DCN20_H_
#define _DMUB_DCN20_H_
-#include "../inc/dmub_types.h"
+#include "../inc/dmub_cmd.h"
struct dmub_srv;
@@ -192,4 +192,8 @@ bool dmub_dcn20_is_gpint_acked(struct dmub_srv *dmub,
uint32_t dmub_dcn20_get_gpint_response(struct dmub_srv *dmub);
+void dmub_dcn20_enable_dmub_boot_options(struct dmub_srv *dmub);
+
+union dmub_fw_boot_status dmub_dcn20_get_fw_boot_status(struct dmub_srv *dmub);
+
#endif /* _DMUB_DCN20_H_ */
diff --git a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn21.c b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn21.c
index e8f488232e34..a6047673c3f5 100644
--- a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn21.c
+++ b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn21.c
@@ -51,14 +51,4 @@ const struct dmub_srv_common_regs dmub_srv_dcn21_regs = {
#undef DMUB_SF
};
-/* Shared functions. */
-bool dmub_dcn21_is_auto_load_done(struct dmub_srv *dmub)
-{
- return (REG_READ(DMCUB_SCRATCH0) == 3);
-}
-
-bool dmub_dcn21_is_phy_init(struct dmub_srv *dmub)
-{
- return REG_READ(DMCUB_SCRATCH10) == 0;
-}
diff --git a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn21.h b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn21.h
index 2bbea237137b..8c4033ae4007 100644
--- a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn21.h
+++ b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn21.h
@@ -32,10 +32,4 @@
extern const struct dmub_srv_common_regs dmub_srv_dcn21_regs;
-/* Hardware functions. */
-
-bool dmub_dcn21_is_auto_load_done(struct dmub_srv *dmub);
-
-bool dmub_dcn21_is_phy_init(struct dmub_srv *dmub);
-
#endif /* _DMUB_DCN21_H_ */
diff --git a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn30.c b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn30.c
new file mode 100644
index 000000000000..ba8d0bfb5522
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn30.c
@@ -0,0 +1,184 @@
+/*
+ * Copyright 2020 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#include "../dmub_srv.h"
+#include "dmub_reg.h"
+#include "dmub_dcn20.h"
+
+#include "sienna_cichlid_ip_offset.h"
+#include "dcn/dcn_3_0_0_offset.h"
+#include "dcn/dcn_3_0_0_sh_mask.h"
+
+#define BASE_INNER(seg) DCN_BASE__INST0_SEG##seg
+#define CTX dmub
+#define REGS dmub->regs
+
+/* Registers. */
+
+const struct dmub_srv_common_regs dmub_srv_dcn30_regs = {
+#define DMUB_SR(reg) REG_OFFSET(reg),
+ { DMUB_COMMON_REGS() },
+#undef DMUB_SR
+
+#define DMUB_SF(reg, field) FD_MASK(reg, field),
+ { DMUB_COMMON_FIELDS() },
+#undef DMUB_SF
+
+#define DMUB_SF(reg, field) FD_SHIFT(reg, field),
+ { DMUB_COMMON_FIELDS() },
+#undef DMUB_SF
+};
+
+/* Shared functions. */
+
+static void dmub_dcn30_get_fb_base_offset(struct dmub_srv *dmub,
+ uint64_t *fb_base,
+ uint64_t *fb_offset)
+{
+ uint32_t tmp;
+
+ if (dmub->fb_base || dmub->fb_offset) {
+ *fb_base = dmub->fb_base;
+ *fb_offset = dmub->fb_offset;
+ return;
+ }
+
+ REG_GET(DCN_VM_FB_LOCATION_BASE, FB_BASE, &tmp);
+ *fb_base = (uint64_t)tmp << 24;
+
+ REG_GET(DCN_VM_FB_OFFSET, FB_OFFSET, &tmp);
+ *fb_offset = (uint64_t)tmp << 24;
+}
+
+static inline void dmub_dcn30_translate_addr(const union dmub_addr *addr_in,
+ uint64_t fb_base,
+ uint64_t fb_offset,
+ union dmub_addr *addr_out)
+{
+ addr_out->quad_part = addr_in->quad_part - fb_base + fb_offset;
+}
+
+void dmub_dcn30_backdoor_load(struct dmub_srv *dmub,
+ const struct dmub_window *cw0,
+ const struct dmub_window *cw1)
+{
+ union dmub_addr offset;
+ uint64_t fb_base, fb_offset;
+
+ dmub_dcn30_get_fb_base_offset(dmub, &fb_base, &fb_offset);
+
+ REG_UPDATE(DMCUB_SEC_CNTL, DMCUB_SEC_RESET, 1);
+
+ /* MEM_CTNL read/write space doesn't exist. */
+
+ dmub_dcn30_translate_addr(&cw0->offset, fb_base, fb_offset, &offset);
+
+ REG_WRITE(DMCUB_REGION3_CW0_OFFSET, offset.u.low_part);
+ REG_WRITE(DMCUB_REGION3_CW0_OFFSET_HIGH, offset.u.high_part);
+ REG_WRITE(DMCUB_REGION3_CW0_BASE_ADDRESS, cw0->region.base);
+ REG_SET_2(DMCUB_REGION3_CW0_TOP_ADDRESS, 0,
+ DMCUB_REGION3_CW0_TOP_ADDRESS, cw0->region.top,
+ DMCUB_REGION3_CW0_ENABLE, 1);
+
+ dmub_dcn30_translate_addr(&cw1->offset, fb_base, fb_offset, &offset);
+
+ REG_WRITE(DMCUB_REGION3_CW1_OFFSET, offset.u.low_part);
+ REG_WRITE(DMCUB_REGION3_CW1_OFFSET_HIGH, offset.u.high_part);
+ REG_WRITE(DMCUB_REGION3_CW1_BASE_ADDRESS, cw1->region.base);
+ REG_SET_2(DMCUB_REGION3_CW1_TOP_ADDRESS, 0,
+ DMCUB_REGION3_CW1_TOP_ADDRESS, cw1->region.top,
+ DMCUB_REGION3_CW1_ENABLE, 1);
+
+ REG_UPDATE_2(DMCUB_SEC_CNTL, DMCUB_SEC_RESET, 0, DMCUB_MEM_UNIT_ID,
+ 0x20);
+}
+
+void dmub_dcn30_setup_windows(struct dmub_srv *dmub,
+ const struct dmub_window *cw2,
+ const struct dmub_window *cw3,
+ const struct dmub_window *cw4,
+ const struct dmub_window *cw5,
+ const struct dmub_window *cw6)
+{
+ union dmub_addr offset;
+
+ /* sienna_cichlid has hardwired virtual addressing for CW2-CW7 */
+
+ offset = cw2->offset;
+
+ if (cw2->region.base != cw2->region.top) {
+ REG_WRITE(DMCUB_REGION3_CW2_OFFSET, offset.u.low_part);
+ REG_WRITE(DMCUB_REGION3_CW2_OFFSET_HIGH, offset.u.high_part);
+ REG_WRITE(DMCUB_REGION3_CW2_BASE_ADDRESS, cw2->region.base);
+ REG_SET_2(DMCUB_REGION3_CW2_TOP_ADDRESS, 0,
+ DMCUB_REGION3_CW2_TOP_ADDRESS, cw2->region.top,
+ DMCUB_REGION3_CW2_ENABLE, 1);
+ } else {
+ REG_WRITE(DMCUB_REGION3_CW2_OFFSET, 0);
+ REG_WRITE(DMCUB_REGION3_CW2_OFFSET_HIGH, 0);
+ REG_WRITE(DMCUB_REGION3_CW2_BASE_ADDRESS, 0);
+ REG_WRITE(DMCUB_REGION3_CW2_TOP_ADDRESS, 0);
+ }
+
+ offset = cw3->offset;
+
+ REG_WRITE(DMCUB_REGION3_CW3_OFFSET, offset.u.low_part);
+ REG_WRITE(DMCUB_REGION3_CW3_OFFSET_HIGH, offset.u.high_part);
+ REG_WRITE(DMCUB_REGION3_CW3_BASE_ADDRESS, cw3->region.base);
+ REG_SET_2(DMCUB_REGION3_CW3_TOP_ADDRESS, 0,
+ DMCUB_REGION3_CW3_TOP_ADDRESS, cw3->region.top,
+ DMCUB_REGION3_CW3_ENABLE, 1);
+
+ offset = cw4->offset;
+
+ REG_WRITE(DMCUB_REGION4_OFFSET, offset.u.low_part);
+ REG_WRITE(DMCUB_REGION4_OFFSET_HIGH, offset.u.high_part);
+ REG_SET_2(DMCUB_REGION4_TOP_ADDRESS, 0, DMCUB_REGION4_TOP_ADDRESS,
+ cw4->region.top - cw4->region.base - 1, DMCUB_REGION4_ENABLE,
+ 1);
+
+ offset = cw5->offset;
+
+ REG_WRITE(DMCUB_REGION3_CW5_OFFSET, offset.u.low_part);
+ REG_WRITE(DMCUB_REGION3_CW5_OFFSET_HIGH, offset.u.high_part);
+ REG_WRITE(DMCUB_REGION3_CW5_BASE_ADDRESS, cw5->region.base);
+ REG_SET_2(DMCUB_REGION3_CW5_TOP_ADDRESS, 0,
+ DMCUB_REGION3_CW5_TOP_ADDRESS, cw5->region.top,
+ DMCUB_REGION3_CW5_ENABLE, 1);
+
+ offset = cw6->offset;
+
+ REG_WRITE(DMCUB_REGION3_CW6_OFFSET, offset.u.low_part);
+ REG_WRITE(DMCUB_REGION3_CW6_OFFSET_HIGH, offset.u.high_part);
+ REG_WRITE(DMCUB_REGION3_CW6_BASE_ADDRESS, cw6->region.base);
+ REG_SET_2(DMCUB_REGION3_CW6_TOP_ADDRESS, 0,
+ DMCUB_REGION3_CW6_TOP_ADDRESS, cw6->region.top,
+ DMCUB_REGION3_CW6_ENABLE, 1);
+}
+
+bool dmub_dcn30_is_auto_load_done(struct dmub_srv *dmub)
+{
+ return (REG_READ(DMCUB_SCRATCH0) > 0);
+}
diff --git a/drivers/gpu/drm/amd/display/dmub/inc/dmub_fw_meta.h b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn30.h
index 242ec257998c..4d8f52b8f12c 100644
--- a/drivers/gpu/drm/amd/display/dmub/inc/dmub_fw_meta.h
+++ b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn30.h
@@ -1,5 +1,5 @@
/*
- * Copyright 2019 Advanced Micro Devices, Inc.
+ * Copyright 2020 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
@@ -22,42 +22,29 @@
* Authors: AMD
*
*/
-#ifndef _DMUB_META_H_
-#define _DMUB_META_H_
-#include "dmub_types.h"
+#ifndef _DMUB_DCN30_H_
+#define _DMUB_DCN30_H_
-#pragma pack(push, 1)
+#include "dmub_dcn20.h"
-/* Magic value for identifying dmub_fw_meta_info */
-#define DMUB_FW_META_MAGIC 0x444D5542
+/* Registers. */
-/* Offset from the end of the file to the dmub_fw_meta_info */
-#define DMUB_FW_META_OFFSET 0x24
+extern const struct dmub_srv_common_regs dmub_srv_dcn30_regs;
-/**
- * struct dmub_fw_meta_info - metadata associated with fw binary
- *
- * NOTE: This should be considered a stable API. Fields should
- * not be repurposed or reordered. New fields should be
- * added instead to extend the structure.
- *
- * @magic_value: magic value identifying DMUB firmware meta info
- * @fw_region_size: size of the firmware state region
- * @trace_buffer_size: size of the tracebuffer region
- */
-struct dmub_fw_meta_info {
- uint32_t magic_value;
- uint32_t fw_region_size;
- uint32_t trace_buffer_size;
-};
+/* Hardware functions. */
+
+void dmub_dcn30_backdoor_load(struct dmub_srv *dmub,
+ const struct dmub_window *cw0,
+ const struct dmub_window *cw1);
-/* Ensure that the structure remains 64 bytes. */
-union dmub_fw_meta {
- struct dmub_fw_meta_info info;
- uint8_t reserved[64];
-};
+void dmub_dcn30_setup_windows(struct dmub_srv *dmub,
+ const struct dmub_window *cw2,
+ const struct dmub_window *cw3,
+ const struct dmub_window *cw4,
+ const struct dmub_window *cw5,
+ const struct dmub_window *cw6);
-#pragma pack(pop)
+bool dmub_dcn30_is_auto_load_done(struct dmub_srv *dmub);
-#endif /* _DMUB_META_H_ */
+#endif /* _DMUB_DCN30_H_ */
diff --git a/drivers/gpu/drm/amd/display/dmub/src/dmub_reg.h b/drivers/gpu/drm/amd/display/dmub/src/dmub_reg.h
index c1f4030929a4..96603d07c23d 100644
--- a/drivers/gpu/drm/amd/display/dmub/src/dmub_reg.h
+++ b/drivers/gpu/drm/amd/display/dmub/src/dmub_reg.h
@@ -26,7 +26,7 @@
#ifndef _DMUB_REG_H_
#define _DMUB_REG_H_
-#include "../inc/dmub_types.h"
+#include "../inc/dmub_cmd.h"
struct dmub_srv;
diff --git a/drivers/gpu/drm/amd/display/dmub/src/dmub_srv.c b/drivers/gpu/drm/amd/display/dmub/src/dmub_srv.c
index 0e3751d94cb0..aa41dfa23020 100644
--- a/drivers/gpu/drm/amd/display/dmub/src/dmub_srv.c
+++ b/drivers/gpu/drm/amd/display/dmub/src/dmub_srv.c
@@ -26,7 +26,10 @@
#include "../dmub_srv.h"
#include "dmub_dcn20.h"
#include "dmub_dcn21.h"
-#include "dmub_fw_meta.h"
+#include "dmub_cmd.h"
+#ifdef CONFIG_DRM_AMD_DC_DCN3_0
+#include "dmub_dcn30.h"
+#endif
#include "os_types.h"
/*
* Note: the DMUB service is standalone. No additional headers should be
@@ -47,10 +50,10 @@
#define DMUB_MAILBOX_SIZE (DMUB_RB_SIZE)
/* Default state size if meta is absent. */
-#define DMUB_FW_STATE_SIZE (1024)
+#define DMUB_FW_STATE_SIZE (64 * 1024)
/* Default tracebuffer size if meta is absent. */
-#define DMUB_TRACE_BUFFER_SIZE (1024)
+#define DMUB_TRACE_BUFFER_SIZE (64 * 1024)
/* Default scratch mem size. */
#define DMUB_SCRATCH_MEM_SIZE (256)
@@ -62,6 +65,7 @@
#define DMUB_CW0_BASE (0x60000000)
#define DMUB_CW1_BASE (0x61000000)
#define DMUB_CW3_BASE (0x63000000)
+#define DMUB_CW4_BASE (0x64000000)
#define DMUB_CW5_BASE (0x65000000)
#define DMUB_CW6_BASE (0x66000000)
@@ -98,12 +102,12 @@ dmub_get_fw_meta_info(const struct dmub_srv_region_params *params)
uint32_t blob_size = 0;
uint32_t meta_offset = 0;
- if (params->fw_bss_data) {
+ if (params->fw_bss_data && params->bss_data_size) {
/* Legacy metadata region. */
blob = params->fw_bss_data;
blob_size = params->bss_data_size;
meta_offset = DMUB_FW_META_OFFSET;
- } else if (params->fw_inst_const) {
+ } else if (params->fw_inst_const && params->inst_const_size) {
/* Combined metadata region. */
blob = params->fw_inst_const;
blob_size = params->inst_const_size;
@@ -132,6 +136,9 @@ static bool dmub_srv_hw_setup(struct dmub_srv *dmub, enum dmub_asic asic)
switch (asic) {
case DMUB_ASIC_DCN20:
case DMUB_ASIC_DCN21:
+#ifdef CONFIG_DRM_AMD_DC_DCN3_0
+ case DMUB_ASIC_DCN30:
+#endif
dmub->regs = &dmub_srv_dcn20_regs;
funcs->reset = dmub_dcn20_reset;
@@ -146,13 +153,20 @@ static bool dmub_srv_hw_setup(struct dmub_srv *dmub, enum dmub_asic asic)
funcs->set_gpint = dmub_dcn20_set_gpint;
funcs->is_gpint_acked = dmub_dcn20_is_gpint_acked;
funcs->get_gpint_response = dmub_dcn20_get_gpint_response;
+ funcs->get_fw_status = dmub_dcn20_get_fw_boot_status;
+ funcs->enable_dmub_boot_options = dmub_dcn20_enable_dmub_boot_options;
- if (asic == DMUB_ASIC_DCN21) {
+ if (asic == DMUB_ASIC_DCN21)
dmub->regs = &dmub_srv_dcn21_regs;
- funcs->is_auto_load_done = dmub_dcn21_is_auto_load_done;
- funcs->is_phy_init = dmub_dcn21_is_phy_init;
+#ifdef CONFIG_DRM_AMD_DC_DCN3_0
+ if (asic == DMUB_ASIC_DCN30) {
+ dmub->regs = &dmub_srv_dcn30_regs;
+
+ funcs->backdoor_load = dmub_dcn30_backdoor_load;
+ funcs->setup_windows = dmub_dcn30_setup_windows;
}
+#endif
break;
default:
@@ -172,6 +186,7 @@ enum dmub_status dmub_srv_create(struct dmub_srv *dmub,
dmub->funcs = params->funcs;
dmub->user_ctx = params->user_ctx;
dmub->asic = params->asic;
+ dmub->fw_version = params->fw_version;
dmub->is_virtual = params->is_virtual;
/* Setup asic dependent hardware funcs. */
@@ -182,13 +197,13 @@ enum dmub_status dmub_srv_create(struct dmub_srv *dmub,
/* Override (some) hardware funcs based on user params. */
if (params->hw_funcs) {
- if (params->hw_funcs->get_inbox1_rptr)
- dmub->hw_funcs.get_inbox1_rptr =
- params->hw_funcs->get_inbox1_rptr;
+ if (params->hw_funcs->emul_get_inbox1_rptr)
+ dmub->hw_funcs.emul_get_inbox1_rptr =
+ params->hw_funcs->emul_get_inbox1_rptr;
- if (params->hw_funcs->set_inbox1_wptr)
- dmub->hw_funcs.set_inbox1_wptr =
- params->hw_funcs->set_inbox1_wptr;
+ if (params->hw_funcs->emul_set_inbox1_wptr)
+ dmub->hw_funcs.emul_set_inbox1_wptr =
+ params->hw_funcs->emul_set_inbox1_wptr;
if (params->hw_funcs->is_supported)
dmub->hw_funcs.is_supported =
@@ -266,6 +281,16 @@ dmub_srv_calc_region_info(struct dmub_srv *dmub,
if (fw_info) {
fw_state_size = fw_info->fw_region_size;
trace_buffer_size = fw_info->trace_buffer_size;
+
+ /**
+ * If DM didn't fill in a version, then fill it in based on
+ * the firmware meta now that we have it.
+ *
+ * TODO: Make it easier for driver to extract this out to
+ * pass during creation.
+ */
+ if (dmub->fw_version == 0)
+ dmub->fw_version = fw_info->fw_version;
}
trace_buff->base = dmub_align(mail->top, 256);
@@ -402,7 +427,7 @@ enum dmub_status dmub_srv_hw_init(struct dmub_srv *dmub,
cw3.region.top = cw3.region.base + bios_fb->size;
cw4.offset.quad_part = mail_fb->gpu_addr;
- cw4.region.base = cw3.region.top + 1;
+ cw4.region.base = DMUB_CW4_BASE;
cw4.region.top = cw4.region.base + mail_fb->size;
inbox1.base = cw4.region.base;
@@ -437,6 +462,10 @@ enum dmub_status dmub_srv_hw_init(struct dmub_srv *dmub,
dmub_rb_init(&dmub->inbox1_rb, &rb_params);
}
+ /* Report to DMUB what features are supported by current driver */
+ if (dmub->hw_funcs.enable_dmub_boot_options)
+ dmub->hw_funcs.enable_dmub_boot_options(dmub);
+
if (dmub->hw_funcs.reset_release)
dmub->hw_funcs.reset_release(dmub);
@@ -485,7 +514,7 @@ enum dmub_status dmub_srv_cmd_execute(struct dmub_srv *dmub)
*/
dmub_rb_flush_pending(&dmub->inbox1_rb);
- dmub->hw_funcs.set_inbox1_wptr(dmub, dmub->inbox1_rb.wrpt);
+ dmub->hw_funcs.set_inbox1_wptr(dmub, dmub->inbox1_rb.wrpt);
return DMUB_STATUS_OK;
}
@@ -497,35 +526,13 @@ enum dmub_status dmub_srv_wait_for_auto_load(struct dmub_srv *dmub,
if (!dmub->hw_init)
return DMUB_STATUS_INVALID;
- if (!dmub->hw_funcs.is_auto_load_done)
- return DMUB_STATUS_OK;
-
for (i = 0; i <= timeout_us; i += 100) {
- if (dmub->hw_funcs.is_auto_load_done(dmub))
- return DMUB_STATUS_OK;
-
- udelay(100);
- }
+ union dmub_fw_boot_status status = dmub->hw_funcs.get_fw_status(dmub);
- return DMUB_STATUS_TIMEOUT;
-}
-
-enum dmub_status dmub_srv_wait_for_phy_init(struct dmub_srv *dmub,
- uint32_t timeout_us)
-{
- uint32_t i = 0;
-
- if (!dmub->hw_init)
- return DMUB_STATUS_INVALID;
-
- if (!dmub->hw_funcs.is_phy_init)
- return DMUB_STATUS_OK;
-
- for (i = 0; i <= timeout_us; i += 10) {
- if (dmub->hw_funcs.is_phy_init(dmub))
+ if (status.bits.dal_fw && status.bits.mailbox_rdy)
return DMUB_STATUS_OK;
- udelay(10);
+ udelay(100);
}
return DMUB_STATUS_TIMEOUT;
@@ -540,7 +547,7 @@ enum dmub_status dmub_srv_wait_for_idle(struct dmub_srv *dmub,
return DMUB_STATUS_INVALID;
for (i = 0; i <= timeout_us; ++i) {
- dmub->inbox1_rb.rptr = dmub->hw_funcs.get_inbox1_rptr(dmub);
+ dmub->inbox1_rb.rptr = dmub->hw_funcs.get_inbox1_rptr(dmub);
if (dmub_rb_empty(&dmub->inbox1_rb))
return DMUB_STATUS_OK;
diff --git a/drivers/gpu/drm/amd/display/include/dal_asic_id.h b/drivers/gpu/drm/amd/display/include/dal_asic_id.h
index 2359e88d6029..abeb58d544b1 100644
--- a/drivers/gpu/drm/amd/display/include/dal_asic_id.h
+++ b/drivers/gpu/drm/amd/display/include/dal_asic_id.h
@@ -166,6 +166,7 @@ enum {
NV_NAVI10_P_A0 = 1,
NV_NAVI12_P_A0 = 10,
NV_NAVI14_M_A0 = 20,
+ NV_SIENNA_CICHLID_P_A0 = 40,
NV_UNKNOWN = 0xFF
};
@@ -173,6 +174,9 @@ enum {
#define ASICREV_IS_NAVI12_P(eChipRev) ((eChipRev >= NV_NAVI12_P_A0) && (eChipRev < NV_NAVI14_M_A0))
#define ASICREV_IS_NAVI14_M(eChipRev) ((eChipRev >= NV_NAVI14_M_A0) && (eChipRev < NV_UNKNOWN))
#define ASICREV_IS_RENOIR(eChipRev) ((eChipRev >= RENOIR_A0) && (eChipRev < RAVEN1_F0))
+#if defined(CONFIG_DRM_AMD_DC_DCN3_0)
+#define ASICREV_IS_SIENNA_CICHLID_P(eChipRev) ((eChipRev >= NV_SIENNA_CICHLID_P_A0))
+#endif
/*
* ASIC chip ID
diff --git a/drivers/gpu/drm/amd/display/include/dal_types.h b/drivers/gpu/drm/amd/display/include/dal_types.h
index 0b6859189ca7..b67c9fa6b9cd 100644
--- a/drivers/gpu/drm/amd/display/include/dal_types.h
+++ b/drivers/gpu/drm/amd/display/include/dal_types.h
@@ -48,6 +48,7 @@ enum dce_version {
DCN_VERSION_1_01,
DCN_VERSION_2_0,
DCN_VERSION_2_1,
+ DCN_VERSION_3_0,
DCN_VERSION_MAX
};
diff --git a/drivers/gpu/drm/amd/display/include/grph_object_ctrl_defs.h b/drivers/gpu/drm/amd/display/include/grph_object_ctrl_defs.h
index d51de94e4bc3..7a06e3914c00 100644
--- a/drivers/gpu/drm/amd/display/include/grph_object_ctrl_defs.h
+++ b/drivers/gpu/drm/amd/display/include/grph_object_ctrl_defs.h
@@ -183,6 +183,11 @@ struct dc_firmware_info {
};
+struct dc_vram_info {
+ unsigned int num_chans;
+ unsigned int dram_channel_width_bytes;
+};
+
struct step_and_delay_info {
uint32_t step;
uint32_t delay;
diff --git a/drivers/gpu/drm/amd/display/modules/color/Makefile b/drivers/gpu/drm/amd/display/modules/color/Makefile
index 65c33a76951a..e66c19a840c2 100644
--- a/drivers/gpu/drm/amd/display/modules/color/Makefile
+++ b/drivers/gpu/drm/amd/display/modules/color/Makefile
@@ -23,7 +23,7 @@
# Makefile for the color sub-module of DAL.
#
-MOD_COLOR = color_gamma.o
+MOD_COLOR = color_gamma.o color_table.o
AMD_DAL_MOD_COLOR = $(addprefix $(AMDDALPATH)/modules/color/,$(MOD_COLOR))
#$(info ************ DAL COLOR MODULE MAKEFILE ************)
diff --git a/drivers/gpu/drm/amd/display/modules/color/color_gamma.c b/drivers/gpu/drm/amd/display/modules/color/color_gamma.c
index bcfe34ef8c28..b8695660b480 100644
--- a/drivers/gpu/drm/amd/display/modules/color/color_gamma.c
+++ b/drivers/gpu/drm/amd/display/modules/color/color_gamma.c
@@ -30,20 +30,10 @@
#include "opp.h"
#include "color_gamma.h"
-#define NUM_PTS_IN_REGION 16
-#define NUM_REGIONS 32
-#define MAX_HW_POINTS (NUM_PTS_IN_REGION*NUM_REGIONS)
-
static struct hw_x_point coordinates_x[MAX_HW_POINTS + 2];
-static struct fixed31_32 pq_table[MAX_HW_POINTS + 2];
-static struct fixed31_32 de_pq_table[MAX_HW_POINTS + 2];
-
// these are helpers for calculations to reduce stack usage
// do not depend on these being preserved across calls
-static struct fixed31_32 scratch_1;
-static struct fixed31_32 scratch_2;
-static struct translate_from_linear_space_args scratch_gamma_args;
/* Helper to optimize gamma calculation, only use in translate_from_linear, in
* particular the dc_fixpt_pow function which is very expensive
@@ -56,9 +46,6 @@ static struct translate_from_linear_space_args scratch_gamma_args;
* just multiply with 2^gamma which can be computed once, and save the result so we
* recursively compute all the values.
*/
-static struct fixed31_32 pow_buffer[NUM_PTS_IN_REGION];
-static struct fixed31_32 gamma_of_2; // 2^gamma
-int pow_buffer_ptr = -1;
/*sRGB 709 2.2 2.4 P3*/
static const int32_t gamma_numerator01[] = { 31308, 180000, 0, 0, 0};
static const int32_t gamma_numerator02[] = { 12920, 4500, 0, 0, 0};
@@ -66,9 +53,6 @@ static const int32_t gamma_numerator03[] = { 55, 99, 0, 0, 0};
static const int32_t gamma_numerator04[] = { 55, 99, 0, 0, 0};
static const int32_t gamma_numerator05[] = { 2400, 2200, 2200, 2400, 2600};
-static bool pq_initialized; /* = false; */
-static bool de_pq_initialized; /* = false; */
-
/* one-time setup of X points */
void setup_x_points_distribution(void)
{
@@ -250,6 +234,8 @@ void precompute_pq(void)
struct fixed31_32 scaling_factor =
dc_fixpt_from_fraction(80, 10000);
+ struct fixed31_32 *pq_table = mod_color_get_table(type_pq_table);
+
/* pow function has problems with arguments too small */
for (i = 0; i < 32; i++)
pq_table[i] = dc_fixpt_zero;
@@ -269,7 +255,7 @@ void precompute_de_pq(void)
uint32_t begin_index, end_index;
struct fixed31_32 scaling_factor = dc_fixpt_from_int(125);
-
+ struct fixed31_32 *de_pq_table = mod_color_get_table(type_de_pq_table);
/* X points is 2^-25 to 2^7
* De-gamma X is 2^-12 to 2^0 – we are skipping first -12-(-25) = 13 regions
*/
@@ -339,6 +325,9 @@ static struct fixed31_32 translate_from_linear_space(
{
const struct fixed31_32 one = dc_fixpt_from_int(1);
+ struct fixed31_32 scratch_1, scratch_2;
+ struct calculate_buffer *cal_buffer = args->cal_buffer;
+
if (dc_fixpt_le(one, args->arg))
return one;
@@ -352,21 +341,21 @@ static struct fixed31_32 translate_from_linear_space(
return scratch_1;
} else if (dc_fixpt_le(args->a0, args->arg)) {
- if (pow_buffer_ptr == 0) {
- gamma_of_2 = dc_fixpt_pow(dc_fixpt_from_int(2),
+ if (cal_buffer->buffer_index == 0) {
+ cal_buffer->gamma_of_2 = dc_fixpt_pow(dc_fixpt_from_int(2),
dc_fixpt_recip(args->gamma));
}
scratch_1 = dc_fixpt_add(one, args->a3);
- if (pow_buffer_ptr < 16)
+ if (cal_buffer->buffer_index < 16)
scratch_2 = dc_fixpt_pow(args->arg,
dc_fixpt_recip(args->gamma));
else
- scratch_2 = dc_fixpt_mul(gamma_of_2,
- pow_buffer[pow_buffer_ptr%16]);
+ scratch_2 = dc_fixpt_mul(cal_buffer->gamma_of_2,
+ cal_buffer->buffer[cal_buffer->buffer_index%16]);
- if (pow_buffer_ptr != -1) {
- pow_buffer[pow_buffer_ptr%16] = scratch_2;
- pow_buffer_ptr++;
+ if (cal_buffer->buffer_index != -1) {
+ cal_buffer->buffer[cal_buffer->buffer_index%16] = scratch_2;
+ cal_buffer->buffer_index++;
}
scratch_1 = dc_fixpt_mul(scratch_1, scratch_2);
@@ -413,15 +402,17 @@ static struct fixed31_32 translate_from_linear_space_long(
args->a1);
}
-static struct fixed31_32 calculate_gamma22(struct fixed31_32 arg, bool use_eetf)
+static struct fixed31_32 calculate_gamma22(struct fixed31_32 arg, bool use_eetf, struct calculate_buffer *cal_buffer)
{
struct fixed31_32 gamma = dc_fixpt_from_fraction(22, 10);
+ struct translate_from_linear_space_args scratch_gamma_args;
scratch_gamma_args.arg = arg;
scratch_gamma_args.a0 = dc_fixpt_zero;
scratch_gamma_args.a1 = dc_fixpt_zero;
scratch_gamma_args.a2 = dc_fixpt_zero;
scratch_gamma_args.a3 = dc_fixpt_zero;
+ scratch_gamma_args.cal_buffer = cal_buffer;
scratch_gamma_args.gamma = gamma;
if (use_eetf)
@@ -467,14 +458,18 @@ static struct fixed31_32 translate_to_linear_space(
static struct fixed31_32 translate_from_linear_space_ex(
struct fixed31_32 arg,
struct gamma_coefficients *coeff,
- uint32_t color_index)
+ uint32_t color_index,
+ struct calculate_buffer *cal_buffer)
{
+ struct translate_from_linear_space_args scratch_gamma_args;
+
scratch_gamma_args.arg = arg;
scratch_gamma_args.a0 = coeff->a0[color_index];
scratch_gamma_args.a1 = coeff->a1[color_index];
scratch_gamma_args.a2 = coeff->a2[color_index];
scratch_gamma_args.a3 = coeff->a3[color_index];
scratch_gamma_args.gamma = coeff->user_gamma[color_index];
+ scratch_gamma_args.cal_buffer = cal_buffer;
return translate_from_linear_space(&scratch_gamma_args);
}
@@ -742,10 +737,11 @@ static void build_pq(struct pwl_float_data_ex *rgb_regamma,
struct fixed31_32 output;
struct fixed31_32 scaling_factor =
dc_fixpt_from_fraction(sdr_white_level, 10000);
+ struct fixed31_32 *pq_table = mod_color_get_table(type_pq_table);
- if (!pq_initialized && sdr_white_level == 80) {
+ if (!mod_color_is_table_init(type_pq_table) && sdr_white_level == 80) {
precompute_pq();
- pq_initialized = true;
+ mod_color_set_table_init_state(type_pq_table, true);
}
/* TODO: start index is from segment 2^-24, skipping first segment
@@ -787,12 +783,12 @@ static void build_de_pq(struct pwl_float_data_ex *de_pq,
{
uint32_t i;
struct fixed31_32 output;
-
+ struct fixed31_32 *de_pq_table = mod_color_get_table(type_de_pq_table);
struct fixed31_32 scaling_factor = dc_fixpt_from_int(125);
- if (!de_pq_initialized) {
+ if (!mod_color_is_table_init(type_de_pq_table)) {
precompute_de_pq();
- de_pq_initialized = true;
+ mod_color_set_table_init_state(type_de_pq_table, true);
}
@@ -811,7 +807,9 @@ static void build_de_pq(struct pwl_float_data_ex *de_pq,
static bool build_regamma(struct pwl_float_data_ex *rgb_regamma,
uint32_t hw_points_num,
- const struct hw_x_point *coordinate_x, enum dc_transfer_func_predefined type)
+ const struct hw_x_point *coordinate_x,
+ enum dc_transfer_func_predefined type,
+ struct calculate_buffer *cal_buffer)
{
uint32_t i;
bool ret = false;
@@ -827,20 +825,21 @@ static bool build_regamma(struct pwl_float_data_ex *rgb_regamma,
if (!build_coefficients(coeff, type))
goto release;
- memset(pow_buffer, 0, NUM_PTS_IN_REGION * sizeof(struct fixed31_32));
- pow_buffer_ptr = 0; // see variable definition for more info
+ memset(cal_buffer->buffer, 0, NUM_PTS_IN_REGION * sizeof(struct fixed31_32));
+ cal_buffer->buffer_index = 0; // see variable definition for more info
+
i = 0;
while (i <= hw_points_num) {
/*TODO use y vs r,g,b*/
rgb->r = translate_from_linear_space_ex(
- coord_x->x, coeff, 0);
+ coord_x->x, coeff, 0, cal_buffer);
rgb->g = rgb->r;
rgb->b = rgb->r;
++coord_x;
++rgb;
++i;
}
- pow_buffer_ptr = -1; // reset back to no optimize
+ cal_buffer->buffer_index = -1;
ret = true;
release:
kvfree(coeff);
@@ -932,7 +931,8 @@ static void hermite_spline_eetf(struct fixed31_32 input_x,
static bool build_freesync_hdr(struct pwl_float_data_ex *rgb_regamma,
uint32_t hw_points_num,
const struct hw_x_point *coordinate_x,
- const struct freesync_hdr_tf_params *fs_params)
+ const struct freesync_hdr_tf_params *fs_params,
+ struct calculate_buffer *cal_buffer)
{
uint32_t i;
struct pwl_float_data_ex *rgb = rgb_regamma;
@@ -969,7 +969,7 @@ static bool build_freesync_hdr(struct pwl_float_data_ex *rgb_regamma,
max_content = max_display;
if (!use_eetf)
- pow_buffer_ptr = 0; // see var definition for more info
+ cal_buffer->buffer_index = 0; // see var definition for more info
rgb += 32; // first 32 points have problems with fixed point, too small
coord_x += 32;
for (i = 32; i <= hw_points_num; i++) {
@@ -988,7 +988,7 @@ static bool build_freesync_hdr(struct pwl_float_data_ex *rgb_regamma,
if (dc_fixpt_lt(scaledX, dc_fixpt_zero))
output = dc_fixpt_zero;
else
- output = calculate_gamma22(scaledX, use_eetf);
+ output = calculate_gamma22(scaledX, use_eetf, cal_buffer);
rgb->r = output;
rgb->g = output;
@@ -1008,7 +1008,7 @@ static bool build_freesync_hdr(struct pwl_float_data_ex *rgb_regamma,
++coord_x;
++rgb;
}
- pow_buffer_ptr = -1;
+ cal_buffer->buffer_index = -1;
return true;
}
@@ -1606,7 +1606,7 @@ static void build_new_custom_resulted_curve(
}
static void apply_degamma_for_user_regamma(struct pwl_float_data_ex *rgb_regamma,
- uint32_t hw_points_num)
+ uint32_t hw_points_num, struct calculate_buffer *cal_buffer)
{
uint32_t i;
@@ -1619,7 +1619,7 @@ static void apply_degamma_for_user_regamma(struct pwl_float_data_ex *rgb_regamma
i = 0;
while (i != hw_points_num + 1) {
rgb->r = translate_from_linear_space_ex(
- coord_x->x, &coeff, 0);
+ coord_x->x, &coeff, 0, cal_buffer);
rgb->g = rgb->r;
rgb->b = rgb->r;
++coord_x;
@@ -1674,7 +1674,8 @@ static bool map_regamma_hw_to_x_user(
#define _EXTRA_POINTS 3
bool calculate_user_regamma_coeff(struct dc_transfer_func *output_tf,
- const struct regamma_lut *regamma)
+ const struct regamma_lut *regamma,
+ struct calculate_buffer *cal_buffer)
{
struct gamma_coefficients coeff;
const struct hw_x_point *coord_x = coordinates_x;
@@ -1706,11 +1707,11 @@ bool calculate_user_regamma_coeff(struct dc_transfer_func *output_tf,
}
while (i != MAX_HW_POINTS + 1) {
output_tf->tf_pts.red[i] = translate_from_linear_space_ex(
- coord_x->x, &coeff, 0);
+ coord_x->x, &coeff, 0, cal_buffer);
output_tf->tf_pts.green[i] = translate_from_linear_space_ex(
- coord_x->x, &coeff, 1);
+ coord_x->x, &coeff, 1, cal_buffer);
output_tf->tf_pts.blue[i] = translate_from_linear_space_ex(
- coord_x->x, &coeff, 2);
+ coord_x->x, &coeff, 2, cal_buffer);
++coord_x;
++i;
}
@@ -1723,7 +1724,8 @@ bool calculate_user_regamma_coeff(struct dc_transfer_func *output_tf,
}
bool calculate_user_regamma_ramp(struct dc_transfer_func *output_tf,
- const struct regamma_lut *regamma)
+ const struct regamma_lut *regamma,
+ struct calculate_buffer *cal_buffer)
{
struct dc_transfer_func_distributed_points *tf_pts = &output_tf->tf_pts;
struct dividers dividers;
@@ -1756,7 +1758,7 @@ bool calculate_user_regamma_ramp(struct dc_transfer_func *output_tf,
scale_user_regamma_ramp(rgb_user, &regamma->ramp, dividers);
if (regamma->flags.bits.applyDegamma == 1) {
- apply_degamma_for_user_regamma(rgb_regamma, MAX_HW_POINTS);
+ apply_degamma_for_user_regamma(rgb_regamma, MAX_HW_POINTS, cal_buffer);
copy_rgb_regamma_to_coordinates_x(coordinates_x,
MAX_HW_POINTS, rgb_regamma);
}
@@ -1943,7 +1945,8 @@ static bool calculate_curve(enum dc_transfer_func_predefined trans,
struct dc_transfer_func_distributed_points *points,
struct pwl_float_data_ex *rgb_regamma,
const struct freesync_hdr_tf_params *fs_params,
- uint32_t sdr_ref_white_level)
+ uint32_t sdr_ref_white_level,
+ struct calculate_buffer *cal_buffer)
{
uint32_t i;
bool ret = false;
@@ -1979,7 +1982,8 @@ static bool calculate_curve(enum dc_transfer_func_predefined trans,
build_freesync_hdr(rgb_regamma,
MAX_HW_POINTS,
coordinates_x,
- fs_params);
+ fs_params,
+ cal_buffer);
ret = true;
} else if (trans == TRANSFER_FUNCTION_HLG) {
@@ -2008,7 +2012,8 @@ static bool calculate_curve(enum dc_transfer_func_predefined trans,
build_regamma(rgb_regamma,
MAX_HW_POINTS,
coordinates_x,
- trans);
+ trans,
+ cal_buffer);
ret = true;
}
@@ -2018,7 +2023,8 @@ static bool calculate_curve(enum dc_transfer_func_predefined trans,
bool mod_color_calculate_regamma_params(struct dc_transfer_func *output_tf,
const struct dc_gamma *ramp, bool mapUserRamp, bool canRomBeUsed,
- const struct freesync_hdr_tf_params *fs_params)
+ const struct freesync_hdr_tf_params *fs_params,
+ struct calculate_buffer *cal_buffer)
{
struct dc_transfer_func_distributed_points *tf_pts = &output_tf->tf_pts;
struct dividers dividers;
@@ -2090,7 +2096,8 @@ bool mod_color_calculate_regamma_params(struct dc_transfer_func *output_tf,
tf_pts,
rgb_regamma,
fs_params,
- output_tf->sdr_ref_white_level);
+ output_tf->sdr_ref_white_level,
+ cal_buffer);
if (ret) {
map_regamma_hw_to_x_user(ramp, coeff, rgb_user,
diff --git a/drivers/gpu/drm/amd/display/modules/color/color_gamma.h b/drivers/gpu/drm/amd/display/modules/color/color_gamma.h
index 7f56226ba77a..37ffbef6602b 100644
--- a/drivers/gpu/drm/amd/display/modules/color/color_gamma.h
+++ b/drivers/gpu/drm/amd/display/modules/color/color_gamma.h
@@ -26,6 +26,8 @@
#ifndef COLOR_MOD_COLOR_GAMMA_H_
#define COLOR_MOD_COLOR_GAMMA_H_
+#include "color_table.h"
+
struct dc_transfer_func;
struct dc_gamma;
struct dc_transfer_func_distributed_points;
@@ -83,6 +85,12 @@ struct freesync_hdr_tf_params {
unsigned int skip_tm; // skip tm
};
+struct calculate_buffer {
+ int buffer_index;
+ struct fixed31_32 buffer[NUM_PTS_IN_REGION];
+ struct fixed31_32 gamma_of_2;
+};
+
struct translate_from_linear_space_args {
struct fixed31_32 arg;
struct fixed31_32 a0;
@@ -90,6 +98,7 @@ struct translate_from_linear_space_args {
struct fixed31_32 a2;
struct fixed31_32 a3;
struct fixed31_32 gamma;
+ struct calculate_buffer *cal_buffer;
};
void setup_x_points_distribution(void);
@@ -99,7 +108,8 @@ void precompute_de_pq(void);
bool mod_color_calculate_regamma_params(struct dc_transfer_func *output_tf,
const struct dc_gamma *ramp, bool mapUserRamp, bool canRomBeUsed,
- const struct freesync_hdr_tf_params *fs_params);
+ const struct freesync_hdr_tf_params *fs_params,
+ struct calculate_buffer *cal_buffer);
bool mod_color_calculate_degamma_params(struct dc_color_caps *dc_caps,
struct dc_transfer_func *output_tf,
@@ -109,10 +119,12 @@ bool mod_color_calculate_degamma_curve(enum dc_transfer_func_predefined trans,
struct dc_transfer_func_distributed_points *points);
bool calculate_user_regamma_coeff(struct dc_transfer_func *output_tf,
- const struct regamma_lut *regamma);
+ const struct regamma_lut *regamma,
+ struct calculate_buffer *cal_buffer);
bool calculate_user_regamma_ramp(struct dc_transfer_func *output_tf,
- const struct regamma_lut *regamma);
+ const struct regamma_lut *regamma,
+ struct calculate_buffer *cal_buffer);
#endif /* COLOR_MOD_COLOR_GAMMA_H_ */
diff --git a/drivers/gpu/drm/amd/display/modules/color/color_table.c b/drivers/gpu/drm/amd/display/modules/color/color_table.c
new file mode 100644
index 000000000000..692e536e7d05
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/modules/color/color_table.c
@@ -0,0 +1,48 @@
+/*
+ * Copyright (c) 2019 Advanced Micro Devices, Inc. (unpublished)
+ *
+ * All rights reserved. This notice is intended as a precaution against
+ * inadvertent publication and does not imply publication or any waiver
+ * of confidentiality. The year included in the foregoing notice is the
+ * year of creation of the work.
+ */
+
+#include "color_table.h"
+
+static struct fixed31_32 pq_table[MAX_HW_POINTS + 2];
+static struct fixed31_32 de_pq_table[MAX_HW_POINTS + 2];
+static bool pq_initialized;
+static bool de_pg_initialized;
+
+bool mod_color_is_table_init(enum table_type type)
+{
+ bool ret = false;
+
+ if (type == type_pq_table)
+ ret = pq_initialized;
+ if (type == type_de_pq_table)
+ ret = de_pg_initialized;
+
+ return ret;
+}
+
+struct fixed31_32 *mod_color_get_table(enum table_type type)
+{
+ struct fixed31_32 *table = NULL;
+
+ if (type == type_pq_table)
+ table = pq_table;
+ if (type == type_de_pq_table)
+ table = de_pq_table;
+
+ return table;
+}
+
+void mod_color_set_table_init_state(enum table_type type, bool state)
+{
+ if (type == type_pq_table)
+ pq_initialized = state;
+ if (type == type_de_pq_table)
+ de_pg_initialized = state;
+}
+
diff --git a/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd_dal.h b/drivers/gpu/drm/amd/display/modules/color/color_table.h
index e42de9ded275..2621dd619402 100644
--- a/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd_dal.h
+++ b/drivers/gpu/drm/amd/display/modules/color/color_table.h
@@ -1,5 +1,5 @@
/*
- * Copyright 2019 Advanced Micro Devices, Inc.
+ * Copyright 2016 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
@@ -23,34 +23,25 @@
*
*/
-#ifndef _DMUB_CMD_DAL_H_
-#define _DMUB_CMD_DAL_H_
-/*
- * Command IDs should be treated as stable ABI.
- * Do not reuse or modify IDs.
- */
+#ifndef COLOR_MOD_COLOR_TABLE_H_
+#define COLOR_MOD_COLOR_TABLE_H_
-enum dmub_cmd_psr_type {
- DMUB_CMD__PSR_SET_VERSION = 0,
- DMUB_CMD__PSR_COPY_SETTINGS = 1,
- DMUB_CMD__PSR_ENABLE = 2,
- DMUB_CMD__PSR_DISABLE = 3,
- DMUB_CMD__PSR_SET_LEVEL = 4,
-};
+#include "dc_types.h"
-enum psr_version {
- PSR_VERSION_1 = 0,
- PSR_VERSION_UNSUPPORTED = 0xFFFFFFFF,
-};
+#define NUM_PTS_IN_REGION 16
+#define NUM_REGIONS 32
+#define MAX_HW_POINTS (NUM_PTS_IN_REGION*NUM_REGIONS)
-enum dmub_cmd_abm_type {
- DMUB_CMD__ABM_INIT_CONFIG = 0,
- DMUB_CMD__ABM_SET_PIPE = 1,
- DMUB_CMD__ABM_SET_BACKLIGHT = 2,
- DMUB_CMD__ABM_SET_LEVEL = 3,
- DMUB_CMD__ABM_SET_AMBIENT_LEVEL = 4,
- DMUB_CMD__ABM_SET_PWM_FRAC = 5,
+enum table_type {
+ type_pq_table,
+ type_de_pq_table
};
-#endif /* _DMUB_CMD_DAL_H_ */
+bool mod_color_is_table_init(enum table_type type);
+
+struct fixed31_32 *mod_color_get_table(enum table_type type);
+
+void mod_color_set_table_init_state(enum table_type type, bool state);
+
+#endif /* COLOR_MOD_COLOR_TABLE_H_ */
diff --git a/drivers/gpu/drm/amd/display/modules/inc/mod_stats.h b/drivers/gpu/drm/amd/display/modules/inc/mod_stats.h
index 3812094b52e8..4220fd8fdd60 100644
--- a/drivers/gpu/drm/amd/display/modules/inc/mod_stats.h
+++ b/drivers/gpu/drm/amd/display/modules/inc/mod_stats.h
@@ -36,7 +36,13 @@ struct mod_stats_caps {
bool dummy;
};
-struct mod_stats *mod_stats_create(struct dc *dc);
+struct mod_stats_init_params {
+ unsigned int stats_enable;
+ unsigned int stats_entries;
+};
+
+struct mod_stats *mod_stats_create(struct dc *dc,
+ struct mod_stats_init_params *init_params);
void mod_stats_destroy(struct mod_stats *mod_stats);
diff --git a/drivers/gpu/drm/amd/display/modules/power/power_helpers.c b/drivers/gpu/drm/amd/display/modules/power/power_helpers.c
index 8c37bcc27132..859724771a75 100644
--- a/drivers/gpu/drm/amd/display/modules/power/power_helpers.c
+++ b/drivers/gpu/drm/amd/display/modules/power/power_helpers.c
@@ -27,8 +27,11 @@
#include "dc/inc/hw/abm.h"
#include "dc.h"
#include "core_types.h"
+#include "dmub_cmd.h"
#define DIV_ROUNDUP(a, b) (((a)+((b)/2))/(b))
+#define bswap16_based_on_endian(big_endian, value) \
+ (big_endian) ? cpu_to_be16(value) : cpu_to_le16(value)
/* Possible Min Reduction config from least aggressive to most aggressive
* 0 1 2 3 4 5 6 7 8 9 10 11 12
@@ -350,6 +353,7 @@ void fill_iram_v_2(struct iram_table_v_2 *ram_table, struct dmcu_iram_parameters
ram_table->bright_pos_gain[4][1] = 0x20;
ram_table->bright_pos_gain[4][2] = 0x20;
ram_table->bright_pos_gain[4][3] = 0x20;
+ ram_table->bright_neg_gain[0][0] = 0x00;
ram_table->bright_neg_gain[0][1] = 0x00;
ram_table->bright_neg_gain[0][2] = 0x00;
ram_table->bright_neg_gain[0][3] = 0x00;
@@ -624,30 +628,30 @@ void fill_iram_v_2_3(struct iram_table_v_2_2 *ram_table, struct dmcu_iram_parame
ram_table->iir_curve[4] = 0x65;
//Gamma 2.2
- ram_table->crgb_thresh[0] = (big_endian) ? cpu_to_be16(0x127c) : cpu_to_le16(0x127c);
- ram_table->crgb_thresh[1] = (big_endian) ? cpu_to_be16(0x151b) : cpu_to_le16(0x151b);
- ram_table->crgb_thresh[2] = (big_endian) ? cpu_to_be16(0x17d5) : cpu_to_le16(0x17d5);
- ram_table->crgb_thresh[3] = (big_endian) ? cpu_to_be16(0x1a56) : cpu_to_le16(0x1a56);
- ram_table->crgb_thresh[4] = (big_endian) ? cpu_to_be16(0x1c83) : cpu_to_le16(0x1c83);
- ram_table->crgb_thresh[5] = (big_endian) ? cpu_to_be16(0x1e72) : cpu_to_le16(0x1e72);
- ram_table->crgb_thresh[6] = (big_endian) ? cpu_to_be16(0x20f0) : cpu_to_le16(0x20f0);
- ram_table->crgb_thresh[7] = (big_endian) ? cpu_to_be16(0x232b) : cpu_to_le16(0x232b);
- ram_table->crgb_offset[0] = (big_endian) ? cpu_to_be16(0x2999) : cpu_to_le16(0x2999);
- ram_table->crgb_offset[1] = (big_endian) ? cpu_to_be16(0x3999) : cpu_to_le16(0x3999);
- ram_table->crgb_offset[2] = (big_endian) ? cpu_to_be16(0x4666) : cpu_to_le16(0x4666);
- ram_table->crgb_offset[3] = (big_endian) ? cpu_to_be16(0x5999) : cpu_to_le16(0x5999);
- ram_table->crgb_offset[4] = (big_endian) ? cpu_to_be16(0x6333) : cpu_to_le16(0x6333);
- ram_table->crgb_offset[5] = (big_endian) ? cpu_to_be16(0x7800) : cpu_to_le16(0x7800);
- ram_table->crgb_offset[6] = (big_endian) ? cpu_to_be16(0x8c00) : cpu_to_le16(0x8c00);
- ram_table->crgb_offset[7] = (big_endian) ? cpu_to_be16(0xa000) : cpu_to_le16(0xa000);
- ram_table->crgb_slope[0] = (big_endian) ? cpu_to_be16(0x3609) : cpu_to_le16(0x3609);
- ram_table->crgb_slope[1] = (big_endian) ? cpu_to_be16(0x2dfa) : cpu_to_le16(0x2dfa);
- ram_table->crgb_slope[2] = (big_endian) ? cpu_to_be16(0x27ea) : cpu_to_le16(0x27ea);
- ram_table->crgb_slope[3] = (big_endian) ? cpu_to_be16(0x235d) : cpu_to_le16(0x235d);
- ram_table->crgb_slope[4] = (big_endian) ? cpu_to_be16(0x2042) : cpu_to_le16(0x2042);
- ram_table->crgb_slope[5] = (big_endian) ? cpu_to_be16(0x1dc3) : cpu_to_le16(0x1dc3);
- ram_table->crgb_slope[6] = (big_endian) ? cpu_to_be16(0x1b1a) : cpu_to_le16(0x1b1a);
- ram_table->crgb_slope[7] = (big_endian) ? cpu_to_be16(0x1910) : cpu_to_le16(0x1910);
+ ram_table->crgb_thresh[0] = bswap16_based_on_endian(big_endian, 0x127c);
+ ram_table->crgb_thresh[1] = bswap16_based_on_endian(big_endian, 0x151b);
+ ram_table->crgb_thresh[2] = bswap16_based_on_endian(big_endian, 0x17d5);
+ ram_table->crgb_thresh[3] = bswap16_based_on_endian(big_endian, 0x1a56);
+ ram_table->crgb_thresh[4] = bswap16_based_on_endian(big_endian, 0x1c83);
+ ram_table->crgb_thresh[5] = bswap16_based_on_endian(big_endian, 0x1e72);
+ ram_table->crgb_thresh[6] = bswap16_based_on_endian(big_endian, 0x20f0);
+ ram_table->crgb_thresh[7] = bswap16_based_on_endian(big_endian, 0x232b);
+ ram_table->crgb_offset[0] = bswap16_based_on_endian(big_endian, 0x2999);
+ ram_table->crgb_offset[1] = bswap16_based_on_endian(big_endian, 0x3999);
+ ram_table->crgb_offset[2] = bswap16_based_on_endian(big_endian, 0x4666);
+ ram_table->crgb_offset[3] = bswap16_based_on_endian(big_endian, 0x5999);
+ ram_table->crgb_offset[4] = bswap16_based_on_endian(big_endian, 0x6333);
+ ram_table->crgb_offset[5] = bswap16_based_on_endian(big_endian, 0x7800);
+ ram_table->crgb_offset[6] = bswap16_based_on_endian(big_endian, 0x8c00);
+ ram_table->crgb_offset[7] = bswap16_based_on_endian(big_endian, 0xa000);
+ ram_table->crgb_slope[0] = bswap16_based_on_endian(big_endian, 0x3609);
+ ram_table->crgb_slope[1] = bswap16_based_on_endian(big_endian, 0x2dfa);
+ ram_table->crgb_slope[2] = bswap16_based_on_endian(big_endian, 0x27ea);
+ ram_table->crgb_slope[3] = bswap16_based_on_endian(big_endian, 0x235d);
+ ram_table->crgb_slope[4] = bswap16_based_on_endian(big_endian, 0x2042);
+ ram_table->crgb_slope[5] = bswap16_based_on_endian(big_endian, 0x1dc3);
+ ram_table->crgb_slope[6] = bswap16_based_on_endian(big_endian, 0x1b1a);
+ ram_table->crgb_slope[7] = bswap16_based_on_endian(big_endian, 0x1910);
fill_backlight_transform_table_v_2_2(
params, ram_table, big_endian);
@@ -656,17 +660,55 @@ void fill_iram_v_2_3(struct iram_table_v_2_2 *ram_table, struct dmcu_iram_parame
bool dmub_init_abm_config(struct abm *abm,
struct dmcu_iram_parameters params)
{
- unsigned char ram_table[IRAM_SIZE];
+ struct iram_table_v_2_2 ram_table;
+ struct abm_config_table config;
bool result = false;
+ uint32_t i, j = 0;
if (abm == NULL)
return false;
memset(&ram_table, 0, sizeof(ram_table));
+ memset(&config, 0, sizeof(config));
+
+ fill_iram_v_2_3(&ram_table, params, false);
+
+ // We must copy to structure that is aligned to 32-bit
+ for (i = 0; i < NUM_POWER_FN_SEGS; i++) {
+ config.crgb_thresh[i] = ram_table.crgb_thresh[i];
+ config.crgb_offset[i] = ram_table.crgb_offset[i];
+ config.crgb_slope[i] = ram_table.crgb_slope[i];
+ }
+
+ for (i = 0; i < NUM_BL_CURVE_SEGS; i++) {
+ config.backlight_thresholds[i] = ram_table.backlight_thresholds[i];
+ config.backlight_offsets[i] = ram_table.backlight_offsets[i];
+ }
+
+ for (i = 0; i < NUM_AMBI_LEVEL; i++)
+ config.iir_curve[i] = ram_table.iir_curve[i];
+
+ for (i = 0; i < NUM_AMBI_LEVEL; i++) {
+ for (j = 0; j < NUM_AGGR_LEVEL; j++) {
+ config.min_reduction[i][j] = ram_table.min_reduction[i][j];
+ config.max_reduction[i][j] = ram_table.max_reduction[i][j];
+ config.bright_pos_gain[i][j] = ram_table.bright_pos_gain[i][j];
+ config.dark_pos_gain[i][j] = ram_table.dark_pos_gain[i][j];
+ }
+ }
+
+ for (i = 0; i < NUM_AGGR_LEVEL; i++) {
+ config.hybrid_factor[i] = ram_table.hybrid_factor[i];
+ config.contrast_factor[i] = ram_table.contrast_factor[i];
+ config.deviation_gain[i] = ram_table.deviation_gain[i];
+ config.min_knee[i] = ram_table.min_knee[i];
+ config.max_knee[i] = ram_table.max_knee[i];
+ }
+
+ config.min_abm_backlight = ram_table.min_abm_backlight;
- fill_iram_v_2_3((struct iram_table_v_2_2 *)ram_table, params, false);
result = abm->funcs->init_abm_config(
- abm, (char *)(&ram_table), IRAM_RESERVE_AREA_START_V2_2);
+ abm, (char *)(&config), sizeof(struct abm_config_table));
return result;
}