summaryrefslogtreecommitdiff
path: root/drivers/gpu/drm/amd/display
diff options
context:
space:
mode:
authorDave Airlie <airlied@redhat.com>2020-07-02 15:17:31 +1000
committerDave Airlie <airlied@redhat.com>2020-07-02 15:17:31 +1000
commit9555152beb1143c85c03f9b9de59863cbbe89f4b (patch)
tree3d43b98bf373e72fe84562adafe3bcbb45d21054 /drivers/gpu/drm/amd/display
parentf75020fcb97a54c0d2ade1f4918db82f44d225ad (diff)
parent7808363154d622f9446bf4db97ff0f041dafa30b (diff)
Merge tag 'amd-drm-next-5.9-2020-07-01' of git://people.freedesktop.org/~agd5f/linux into drm-next
amd-drm-next-5.9-2020-07-01: amdgpu: - DC DMUB updates - HDCP fixes - Thermal interrupt fixes - Add initial support for Sienna Cichlid GPU - Add support for unique id on Arcturus - Major swSMU code cleanup - Skip BAR resizing if the bios already did id - Fixes for DCN bandwidth calculations - Runtime PM reference count fixes - Add initial UVD support for SI - Add support for ASSR on eDP links - Lots of misc fixes and cleanups - Enable runtime PM on vega10 boards that support BACO - RAS fixes - SR-IOV fixes - Use IP discovery table on renoir - DC stream synchronization fixes amdkfd: - Track SDMA usage per process - Fix GCC10 compiler warnings - Locking fix radeon: - Default to on chip GART for AGP boards on all arches - Runtime PM reference count fixes UAPI: - Update comments to clarify MTYPE From: Alex Deucher <alexdeucher@gmail.com> Link: https://patchwork.freedesktop.org/patch/msgid/20200701155041.1102829-1-alexander.deucher@amd.com Signed-off-by: Dave Airlie <airlied@redhat.com>
Diffstat (limited to 'drivers/gpu/drm/amd/display')
-rw-r--r--drivers/gpu/drm/amd/display/Kconfig8
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c100
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h2
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_color.c10
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c8
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.c43
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c20
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c19
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_pp_smu.c30
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_services.c25
-rw-r--r--drivers/gpu/drm/amd/display/dc/Makefile4
-rw-r--r--drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c204
-rw-r--r--drivers/gpu/drm/amd/display/dc/bios/command_table_helper2.c5
-rw-r--r--drivers/gpu/drm/amd/display/dc/bios/dce112/command_table_helper2_dce112.c40
-rw-r--r--drivers/gpu/drm/amd/display/dc/bios/dce112/command_table_helper2_dce112.h3
-rw-r--r--drivers/gpu/drm/amd/display/dc/calcs/dcn_calc_auto.c6
-rw-r--r--drivers/gpu/drm/amd/display/dc/calcs/dcn_calcs.c18
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/Makefile10
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/clk_mgr.c22
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn20/dcn20_clk_mgr.c18
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr.c3
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn30/dalsmc.h60
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn30/dcn30_clk_mgr.c543
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn30/dcn30_clk_mgr.h (renamed from drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd_vbios.h)25
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn30/dcn30_clk_mgr_smu_msg.c255
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn30/dcn30_clk_mgr_smu_msg.h108
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc.c137
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_debug.c59
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_link.c43
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_link_ddc.c42
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c163
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_link_hwss.c8
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_resource.c40
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_stream.c37
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_vm_helper.c3
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc.h65
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_bios_types.h5
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c28
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_dmub_srv.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_hw_types.h75
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_link.h6
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_stream.h15
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_types.h36
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/Makefile3
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_abm.h20
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_audio.c4
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_clock_source.c153
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_clock_source.h42
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_hwseq.h46
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_panel_cntl.c10
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dmub_abm.c92
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dmub_hw_lock_mgr.c (renamed from drivers/gpu/drm/amd/display/dmub/inc/dmub_types.h)78
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dmub_hw_lock_mgr.h39
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dmub_psr.c3
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c28
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.h1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.c12
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c37
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_init.c1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_link_encoder.c7
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_link_encoder.h6
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.c13
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.h62
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_stream_encoder.h42
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dccg.h22
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dpp.c4
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dsc.c17
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dsc.h12
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubbub.c9
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubp.c20
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubp.h41
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c134
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn20/dcn20_init.c1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn20/dcn20_link_encoder.h7
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c93
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hubp.c121
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hwseq.c89
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hwseq.h6
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn21/dcn21_init.c5
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c4
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn30/Makefile54
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn30/dcn30_afmt.c206
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn30/dcn30_afmt.h230
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn30/dcn30_cm_common.c640
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn30/dcn30_cm_common.h78
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dccg.c100
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dccg.h66
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dio_link_encoder.c205
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dio_link_encoder.h76
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dio_stream_encoder.c851
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dio_stream_encoder.h269
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dpp.c1414
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dpp.h608
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dpp_cm.c410
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dwb.c264
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dwb.h923
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dwb_cm.c354
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hubbub.c417
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hubbub.h119
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hubp.c532
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hubp.h292
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hwseq.c719
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hwseq.h70
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn30/dcn30_init.c141
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn30/dcn30_init.h33
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn30/dcn30_mmhubbub.c239
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn30/dcn30_mmhubbub.h463
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn30/dcn30_mpc.c1409
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn30/dcn30_mpc.h665
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn30/dcn30_opp.h36
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn30/dcn30_optc.c365
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn30/dcn30_optc.h333
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn30/dcn30_resource.c2691
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn30/dcn30_resource.h82
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn30/dcn30_vpg.c194
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn30/dcn30_vpg.h133
-rw-r--r--drivers/gpu/drm/amd/display/dc/dm_cp_psp.h1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dm_helpers.h23
-rw-r--r--drivers/gpu/drm/amd/display/dc/dm_services.h69
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/Makefile7
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn20/display_rq_dlg_calc_20.c33
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn20/display_rq_dlg_calc_20v2.c33
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn21/display_rq_dlg_calc_21.c36
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn30/display_mode_vba_30.c6865
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn30/display_mode_vba_30.h43
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn30/display_rq_dlg_calc_30.c1868
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn30/display_rq_dlg_calc_30.h69
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/display_mode_enums.h6
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/display_mode_lib.c181
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/display_mode_lib.h11
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/display_mode_structs.h14
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/display_mode_vba.c67
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/display_mode_vba.h230
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dml_inline_defs.h18
-rw-r--r--drivers/gpu/drm/amd/display/dc/dsc/rc_calc.c8
-rw-r--r--drivers/gpu/drm/amd/display/dc/gpio/Makefile10
-rw-r--r--drivers/gpu/drm/amd/display/dc/gpio/dcn30/hw_factory_dcn30.c257
-rw-r--r--drivers/gpu/drm/amd/display/dc/gpio/dcn30/hw_factory_dcn30.h33
-rw-r--r--drivers/gpu/drm/amd/display/dc/gpio/dcn30/hw_translate_dcn30.c387
-rw-r--r--drivers/gpu/drm/amd/display/dc/gpio/dcn30/hw_translate_dcn30.h35
-rw-r--r--drivers/gpu/drm/amd/display/dc/gpio/hw_factory.c9
-rw-r--r--drivers/gpu/drm/amd/display/dc/gpio/hw_translate.c8
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/core_status.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/core_types.h31
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/dc_link_ddc.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/dc_link_dp.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr.h75
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr_internal.h14
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/dchubbub.h6
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/dpp.h7
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/dwb.h83
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/hubp.h7
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/hw_shared.h9
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/mcif_wb.h8
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/mpc.h108
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/stream_encoder.h4
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/timing_generator.h19
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h6
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/resource.h3
-rw-r--r--drivers/gpu/drm/amd/display/dc/irq/Makefile10
-rw-r--r--drivers/gpu/drm/amd/display/dc/irq/dcn30/irq_service_dcn30.c384
-rw-r--r--drivers/gpu/drm/amd/display/dc/irq/dcn30/irq_service_dcn30.h37
-rw-r--r--drivers/gpu/drm/amd/display/dc/os_types.h10
-rw-r--r--drivers/gpu/drm/amd/display/dmub/dmub_srv.h27
-rw-r--r--drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h500
-rw-r--r--drivers/gpu/drm/amd/display/dmub/inc/dmub_gpint_cmd.h75
-rw-r--r--drivers/gpu/drm/amd/display/dmub/inc/dmub_rb.h152
-rw-r--r--drivers/gpu/drm/amd/display/dmub/src/Makefile3
-rw-r--r--drivers/gpu/drm/amd/display/dmub/src/dmub_dcn20.c49
-rw-r--r--drivers/gpu/drm/amd/display/dmub/src/dmub_dcn20.h6
-rw-r--r--drivers/gpu/drm/amd/display/dmub/src/dmub_dcn21.c10
-rw-r--r--drivers/gpu/drm/amd/display/dmub/src/dmub_dcn21.h6
-rw-r--r--drivers/gpu/drm/amd/display/dmub/src/dmub_dcn30.c184
-rw-r--r--drivers/gpu/drm/amd/display/dmub/src/dmub_dcn30.h (renamed from drivers/gpu/drm/amd/display/dmub/inc/dmub_fw_meta.h)51
-rw-r--r--drivers/gpu/drm/amd/display/dmub/src/dmub_reg.h2
-rw-r--r--drivers/gpu/drm/amd/display/dmub/src/dmub_srv.c91
-rw-r--r--drivers/gpu/drm/amd/display/include/dal_asic_id.h4
-rw-r--r--drivers/gpu/drm/amd/display/include/dal_types.h1
-rw-r--r--drivers/gpu/drm/amd/display/include/grph_object_ctrl_defs.h5
-rw-r--r--drivers/gpu/drm/amd/display/modules/color/Makefile2
-rw-r--r--drivers/gpu/drm/amd/display/modules/color/color_gamma.c115
-rw-r--r--drivers/gpu/drm/amd/display/modules/color/color_gamma.h18
-rw-r--r--drivers/gpu/drm/amd/display/modules/color/color_table.c48
-rw-r--r--drivers/gpu/drm/amd/display/modules/color/color_table.h (renamed from drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd_dal.h)43
-rw-r--r--drivers/gpu/drm/amd/display/modules/inc/mod_stats.h8
-rw-r--r--drivers/gpu/drm/amd/display/modules/power/power_helpers.c96
186 files changed, 30607 insertions, 1355 deletions
diff --git a/drivers/gpu/drm/amd/display/Kconfig b/drivers/gpu/drm/amd/display/Kconfig
index 1911a34cc060..34ae4f3a32f4 100644
--- a/drivers/gpu/drm/amd/display/Kconfig
+++ b/drivers/gpu/drm/amd/display/Kconfig
@@ -17,6 +17,14 @@ config DRM_AMD_DC_DCN
help
Raven, Navi and Renoir family support for display engine
+config DRM_AMD_DC_DCN3_0
+ bool "DCN 3.0 family"
+ depends on DRM_AMD_DC && X86
+ depends on DRM_AMD_DC_DCN
+ help
+ Choose this option if you want to have
+ sienna_cichlid support for display engine
+
config DRM_AMD_DC_HDCP
bool "Enable HDCP support in DC"
depends on DRM_AMD_DC
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
index 7ced9f87be97..9b1f5244ec87 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
@@ -94,6 +94,10 @@
#define FIRMWARE_RENOIR_DMUB "amdgpu/renoir_dmcub.bin"
MODULE_FIRMWARE(FIRMWARE_RENOIR_DMUB);
+#if defined(CONFIG_DRM_AMD_DC_DCN3_0)
+#define FIRMWARE_SIENNA_CICHLID_DMUB "amdgpu/sienna_cichlid_dmcub.bin"
+MODULE_FIRMWARE(FIRMWARE_SIENNA_CICHLID_DMUB);
+#endif
#define FIRMWARE_RAVEN_DMCU "amdgpu/raven_dmcu.bin"
MODULE_FIRMWARE(FIRMWARE_RAVEN_DMCU);
@@ -696,7 +700,7 @@ static void amdgpu_dm_audio_fini(struct amdgpu_device *adev)
adev->mode_info.audio.enabled = false;
}
-void amdgpu_dm_audio_eld_notify(struct amdgpu_device *adev, int pin)
+static void amdgpu_dm_audio_eld_notify(struct amdgpu_device *adev, int pin)
{
struct drm_audio_component *acomp = adev->dm.audio_component;
@@ -1070,6 +1074,9 @@ static int load_dmcu_fw(struct amdgpu_device *adev)
case CHIP_NAVI10:
case CHIP_NAVI14:
case CHIP_RENOIR:
+#if defined(CONFIG_DRM_AMD_DC_DCN3_0)
+ case CHIP_SIENNA_CICHLID:
+#endif
return 0;
case CHIP_NAVI12:
fw_name_dmcu = FIRMWARE_NAVI12_DMCU;
@@ -1166,6 +1173,12 @@ static int dm_dmub_sw_init(struct amdgpu_device *adev)
dmub_asic = DMUB_ASIC_DCN21;
fw_name_dmub = FIRMWARE_RENOIR_DMUB;
break;
+#if defined(CONFIG_DRM_AMD_DC_DCN3_0)
+ case CHIP_SIENNA_CICHLID:
+ dmub_asic = DMUB_ASIC_DCN30;
+ fw_name_dmub = FIRMWARE_SIENNA_CICHLID_DMUB;
+ break;
+#endif
default:
/* ASIC doesn't support DMUB. */
@@ -1228,10 +1241,10 @@ static int dm_dmub_sw_init(struct amdgpu_device *adev)
PSP_HEADER_BYTES - PSP_FOOTER_BYTES;
region_params.bss_data_size = le32_to_cpu(hdr->bss_data_bytes);
region_params.vbios_size = adev->bios_size;
- region_params.fw_bss_data =
+ region_params.fw_bss_data = region_params.bss_data_size ?
adev->dm.dmub_fw->data +
le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
- le32_to_cpu(hdr->inst_const_bytes);
+ le32_to_cpu(hdr->inst_const_bytes) : NULL;
region_params.fw_inst_const =
adev->dm.dmub_fw->data +
le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
@@ -1305,15 +1318,11 @@ static int dm_sw_fini(void *handle)
adev->dm.dmub_srv = NULL;
}
- if (adev->dm.dmub_fw) {
- release_firmware(adev->dm.dmub_fw);
- adev->dm.dmub_fw = NULL;
- }
+ release_firmware(adev->dm.dmub_fw);
+ adev->dm.dmub_fw = NULL;
- if(adev->dm.fw_dmcu) {
- release_firmware(adev->dm.fw_dmcu);
- adev->dm.fw_dmcu = NULL;
- }
+ release_firmware(adev->dm.fw_dmcu);
+ adev->dm.fw_dmcu = NULL;
return 0;
}
@@ -1573,7 +1582,7 @@ static void dm_gpureset_toggle_interrupts(struct amdgpu_device *adev,
}
-enum dc_status amdgpu_dm_commit_zero_streams(struct dc *dc)
+static enum dc_status amdgpu_dm_commit_zero_streams(struct dc *dc)
{
struct dc_state *context = NULL;
enum dc_status res = DC_ERROR_UNEXPECTED;
@@ -2693,7 +2702,7 @@ static int dm_atomic_get_state(struct drm_atomic_state *state,
return 0;
}
-struct dm_atomic_state *
+static struct dm_atomic_state *
dm_atomic_get_new_state(struct drm_atomic_state *state)
{
struct drm_device *dev = state->dev;
@@ -2711,7 +2720,7 @@ dm_atomic_get_new_state(struct drm_atomic_state *state)
return NULL;
}
-struct dm_atomic_state *
+static struct dm_atomic_state *
dm_atomic_get_old_state(struct drm_atomic_state *state)
{
struct drm_device *dev = state->dev;
@@ -3205,6 +3214,9 @@ static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev)
case CHIP_NAVI10:
case CHIP_NAVI14:
case CHIP_RENOIR:
+#if defined(CONFIG_DRM_AMD_DC_DCN3_0)
+ case CHIP_SIENNA_CICHLID:
+#endif
if (dcn10_register_irq_handlers(dm->adev)) {
DRM_ERROR("DM: Failed to initialize IRQ\n");
goto fail;
@@ -3359,6 +3371,9 @@ static int dm_early_init(void *handle)
#endif
case CHIP_NAVI10:
case CHIP_NAVI12:
+#if defined(CONFIG_DRM_AMD_DC_DCN3_0)
+ case CHIP_SIENNA_CICHLID:
+#endif
adev->mode_info.num_crtc = 6;
adev->mode_info.num_hpd = 6;
adev->mode_info.num_dig = 6;
@@ -3679,6 +3694,9 @@ fill_plane_buffer_attributes(struct amdgpu_device *adev,
adev->asic_type == CHIP_NAVI10 ||
adev->asic_type == CHIP_NAVI14 ||
adev->asic_type == CHIP_NAVI12 ||
+#if defined(CONFIG_DRM_AMD_DC_DCN3_0)
+ adev->asic_type == CHIP_SIENNA_CICHLID ||
+#endif
adev->asic_type == CHIP_RENOIR ||
adev->asic_type == CHIP_RAVEN) {
/* Fill GFX9 params */
@@ -3698,6 +3716,11 @@ fill_plane_buffer_attributes(struct amdgpu_device *adev,
AMDGPU_TILING_GET(tiling_flags, SWIZZLE_MODE);
tiling_info->gfx9.shaderEnable = 1;
+#ifdef CONFIG_DRM_AMD_DC_DCN3_0
+ if (adev->asic_type == CHIP_SIENNA_CICHLID)
+ tiling_info->gfx9.num_pkrs = adev->gfx.config.gb_addr_config_fields.num_pkrs;
+
+#endif
ret = fill_plane_dcc_attributes(adev, afb, format, rotation,
plane_size, tiling_info,
tiling_flags, dcc, address,
@@ -4546,10 +4569,8 @@ create_stream_for_sink(struct amdgpu_dm_connector *aconnector,
stream->use_vsc_sdp_for_colorimetry =
aconnector->dc_sink->is_vsc_sdp_colorimetry_supported;
} else {
- if (stream->link->dpcd_caps.dpcd_rev.raw >= 0x14 &&
- stream->link->dpcd_caps.dprx_feature.bits.VSC_SDP_COLORIMETRY_SUPPORTED) {
+ if (stream->link->dpcd_caps.dprx_feature.bits.VSC_SDP_COLORIMETRY_SUPPORTED)
stream->use_vsc_sdp_for_colorimetry = true;
- }
}
mod_build_vsc_infopacket(stream, &stream->vsc_infopacket);
}
@@ -5024,7 +5045,8 @@ create_validate_stream_for_sink(struct amdgpu_dm_connector *aconnector,
struct drm_connector *connector = &aconnector->base;
struct amdgpu_device *adev = connector->dev->dev_private;
struct dc_stream_state *stream;
- int requested_bpc = connector->state ? connector->state->max_requested_bpc : 8;
+ const struct drm_connector_state *drm_state = dm_state ? &dm_state->base : NULL;
+ int requested_bpc = drm_state ? drm_state->max_requested_bpc : 8;
enum dc_status dc_result = DC_OK;
do {
@@ -5039,11 +5061,12 @@ create_validate_stream_for_sink(struct amdgpu_dm_connector *aconnector,
dc_result = dc_validate_stream(adev->dm.dc, stream);
if (dc_result != DC_OK) {
- DRM_DEBUG_KMS("Mode %dx%d (clk %d) failed DC validation with error %d\n",
+ DRM_DEBUG_KMS("Mode %dx%d (clk %d) failed DC validation with error %d (%s)\n",
drm_mode->hdisplay,
drm_mode->vdisplay,
drm_mode->clock,
- dc_result);
+ dc_result,
+ dc_status_to_str(dc_result));
dc_stream_release(stream);
stream = NULL;
@@ -5424,7 +5447,7 @@ static int dm_encoder_helper_atomic_check(struct drm_encoder *encoder,
mst_mgr,
mst_port,
dm_new_connector_state->pbn,
- 0);
+ dm_mst_get_pbn_divider(aconnector->dc_link));
if (dm_new_connector_state->vcpi_slots < 0) {
DRM_DEBUG_ATOMIC("failed finding vcpi slots: %d\n", (int)dm_new_connector_state->vcpi_slots);
return dm_new_connector_state->vcpi_slots;
@@ -5536,7 +5559,7 @@ dm_drm_plane_duplicate_state(struct drm_plane *plane)
return &dm_plane_state->base;
}
-void dm_drm_plane_destroy_state(struct drm_plane *plane,
+static void dm_drm_plane_destroy_state(struct drm_plane *plane,
struct drm_plane_state *state)
{
struct dm_plane_state *dm_plane_state = to_dm_plane_state(state);
@@ -5665,6 +5688,17 @@ static void dm_plane_helper_cleanup_fb(struct drm_plane *plane,
amdgpu_bo_unref(&rbo);
}
+static int dm_plane_helper_check_state(struct drm_plane_state *state,
+ struct drm_crtc_state *new_crtc_state)
+{
+ int max_downscale = 0;
+ int max_upscale = INT_MAX;
+
+ /* TODO: These should be checked against DC plane caps */
+ return drm_atomic_helper_check_plane_state(
+ state, new_crtc_state, max_downscale, max_upscale, true, true);
+}
+
static int dm_plane_atomic_check(struct drm_plane *plane,
struct drm_plane_state *state)
{
@@ -5672,6 +5706,7 @@ static int dm_plane_atomic_check(struct drm_plane *plane,
struct dc *dc = adev->dm.dc;
struct dm_plane_state *dm_plane_state;
struct dc_scaling_info scaling_info;
+ struct drm_crtc_state *new_crtc_state;
int ret;
dm_plane_state = to_dm_plane_state(state);
@@ -5679,6 +5714,15 @@ static int dm_plane_atomic_check(struct drm_plane *plane,
if (!dm_plane_state->dc_state)
return 0;
+ new_crtc_state =
+ drm_atomic_get_new_crtc_state(state->state, state->crtc);
+ if (!new_crtc_state)
+ return -EINVAL;
+
+ ret = dm_plane_helper_check_state(state, new_crtc_state);
+ if (ret)
+ return ret;
+
ret = fill_dc_scaling_info(state, &scaling_info);
if (ret)
return ret;
@@ -8195,6 +8239,10 @@ static int dm_update_plane_state(struct dc *dc,
if (!needs_reset)
return 0;
+ ret = dm_plane_helper_check_state(new_plane_state, new_crtc_state);
+ if (ret)
+ return ret;
+
WARN_ON(dm_new_plane_state->dc_state);
dc_new_plane_state = dc_create_plane_state(dc);
@@ -8477,7 +8525,7 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev,
struct drm_plane_state *old_plane_state, *new_plane_state;
enum surface_update_type update_type = UPDATE_TYPE_FAST;
enum surface_update_type overall_update_type = UPDATE_TYPE_FAST;
-
+ enum dc_status status;
int ret, i;
/*
@@ -8689,8 +8737,10 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev,
ret = drm_dp_mst_atomic_check(state);
if (ret)
goto fail;
-
- if (dc_validate_global_state(dc, dm_state->context, false) != DC_OK) {
+ status = dc_validate_global_state(dc, dm_state->context, false);
+ if (status != DC_OK) {
+ DC_LOG_WARNING("DC global validation failure: %s (%d)",
+ dc_status_to_str(status), status);
ret = -EINVAL;
goto fail;
}
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h
index d61186ff411d..86c132ddc452 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h
@@ -139,10 +139,12 @@ struct amdgpu_dm_backlight_caps {
* @backlight_link: Link on which to control backlight
* @backlight_caps: Capabilities of the backlight device
* @freesync_module: Module handling freesync calculations
+ * @hdcp_workqueue: AMDGPU content protection queue
* @fw_dmcu: Reference to DMCU firmware
* @dmcu_fw_version: Version of the DMCU firmware
* @soc_bounding_box: SOC bounding box values provided by gpu_info FW
* @cached_state: Caches device atomic state for suspend/resume
+ * @cached_dc_state: Cached state of content streams
* @compressor: Frame buffer compression buffer. See &struct dm_comressor_info
*/
struct amdgpu_display_manager {
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_color.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_color.c
index 4dfb6b55bb2e..b321ff654df4 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_color.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_color.c
@@ -195,10 +195,13 @@ static int __set_legacy_tf(struct dc_transfer_func *func,
bool has_rom)
{
struct dc_gamma *gamma = NULL;
+ struct calculate_buffer cal_buffer = {0};
bool res;
ASSERT(lut && lut_size == MAX_COLOR_LEGACY_LUT_ENTRIES);
+ cal_buffer.buffer_index = -1;
+
gamma = dc_create_gamma();
if (!gamma)
return -ENOMEM;
@@ -208,7 +211,7 @@ static int __set_legacy_tf(struct dc_transfer_func *func,
__drm_lut_to_dc_gamma(lut, gamma, true);
res = mod_color_calculate_regamma_params(func, gamma, true, has_rom,
- NULL);
+ NULL, &cal_buffer);
dc_gamma_release(&gamma);
@@ -221,10 +224,13 @@ static int __set_output_tf(struct dc_transfer_func *func,
bool has_rom)
{
struct dc_gamma *gamma = NULL;
+ struct calculate_buffer cal_buffer = {0};
bool res;
ASSERT(lut && lut_size == MAX_COLOR_LUT_ENTRIES);
+ cal_buffer.buffer_index = -1;
+
gamma = dc_create_gamma();
if (!gamma)
return -ENOMEM;
@@ -248,7 +254,7 @@ static int __set_output_tf(struct dc_transfer_func *func,
*/
gamma->type = GAMMA_CS_TFM_1D;
res = mod_color_calculate_regamma_params(func, gamma, false,
- has_rom, NULL);
+ has_rom, NULL, &cal_buffer);
}
dc_gamma_release(&gamma);
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c
index 076af267b488..db4fab10a0c4 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c
@@ -859,8 +859,8 @@ static int hdcp_sink_capability_show(struct seq_file *m, void *data)
seq_printf(m, "%s:%d HDCP version: ", connector->name, connector->base.id);
- hdcp_cap = dc_link_is_hdcp14(aconnector->dc_link);
- hdcp2_cap = dc_link_is_hdcp22(aconnector->dc_link);
+ hdcp_cap = dc_link_is_hdcp14(aconnector->dc_link, aconnector->dc_sink->sink_signal);
+ hdcp2_cap = dc_link_is_hdcp22(aconnector->dc_link, aconnector->dc_sink->sink_signal);
if (hdcp_cap)
@@ -1058,7 +1058,6 @@ static const struct {
{"link_settings", &dp_link_settings_debugfs_fops},
{"phy_settings", &dp_phy_settings_debugfs_fop},
{"test_pattern", &dp_phy_test_pattern_fops},
- {"output_bpc", &output_bpc_fops},
{"vrr_range", &vrr_range_fops},
#ifdef CONFIG_DRM_AMD_DC_HDCP
{"hdcp_sink_capability", &hdcp_sink_capability_fops},
@@ -1142,6 +1141,9 @@ void connector_debugfs_init(struct amdgpu_dm_connector *connector)
debugfs_create_file_unsafe("force_yuv420_output", 0644, dir, connector,
&force_yuv420_output_fops);
+ debugfs_create_file("output_bpc", 0644, dir, connector,
+ &output_bpc_fops);
+
connector->debugfs_dpcd_address = 0;
connector->debugfs_dpcd_size = 0;
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.c
index dcf84a61de37..694c5bc93665 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.c
@@ -390,6 +390,42 @@ void hdcp_destroy(struct hdcp_workqueue *hdcp_work)
kfree(hdcp_work);
}
+
+static bool enable_assr(void *handle, struct dc_link *link)
+{
+
+ struct hdcp_workqueue *hdcp_work = handle;
+ struct mod_hdcp hdcp = hdcp_work->hdcp;
+ struct psp_context *psp = hdcp.config.psp.handle;
+ struct ta_dtm_shared_memory *dtm_cmd;
+ bool res = true;
+
+ if (!psp->dtm_context.dtm_initialized) {
+ DRM_INFO("Failed to enable ASSR, DTM TA is not initialized.");
+ return false;
+ }
+
+ dtm_cmd = (struct ta_dtm_shared_memory *)psp->dtm_context.dtm_shared_buf;
+
+ mutex_lock(&psp->dtm_context.mutex);
+ memset(dtm_cmd, 0, sizeof(struct ta_dtm_shared_memory));
+
+ dtm_cmd->cmd_id = TA_DTM_COMMAND__TOPOLOGY_ASSR_ENABLE;
+ dtm_cmd->dtm_in_message.topology_assr_enable.display_topology_dig_be_index = link->link_enc_hw_inst;
+ dtm_cmd->dtm_status = TA_DTM_STATUS__GENERIC_FAILURE;
+
+ psp_dtm_invoke(psp, dtm_cmd->cmd_id);
+
+ if (dtm_cmd->dtm_status != TA_DTM_STATUS__SUCCESS) {
+ DRM_INFO("Failed to enable ASSR");
+ res = false;
+ }
+
+ mutex_unlock(&psp->dtm_context.mutex);
+
+ return res;
+}
+
static void update_config(void *handle, struct cp_psp_stream_config *config)
{
struct hdcp_workqueue *hdcp_work = handle;
@@ -510,8 +546,10 @@ static ssize_t srm_data_read(struct file *filp, struct kobject *kobj, struct bin
srm = psp_get_srm(work->hdcp.config.psp.handle, &srm_version, &srm_size);
- if (!srm)
- return -EINVAL;
+ if (!srm) {
+ ret = -EINVAL;
+ goto ret;
+ }
if (pos >= srm_size)
ret = 0;
@@ -599,6 +637,7 @@ struct hdcp_workqueue *hdcp_create_workqueue(struct amdgpu_device *adev, struct
}
cp_psp->funcs.update_stream_config = update_config;
+ cp_psp->funcs.enable_assr = enable_assr;
cp_psp->handle = hdcp_work;
/* File created at /sys/class/drm/card0/device/hdcp_srm*/
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c
index b086d5c906e0..d839eb14e3f0 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c
@@ -627,3 +627,23 @@ void dm_set_dcn_clocks(struct dc_context *ctx, struct dc_clocks *clks)
{
/* TODO: something */
}
+#ifdef CONFIG_DRM_AMD_DC_DCN3_0
+
+void *dm_helpers_allocate_gpu_mem(
+ struct dc_context *ctx,
+ enum dc_gpu_mem_alloc_type type,
+ size_t size,
+ long long *addr)
+{
+ // TODO
+ return NULL;
+}
+
+void dm_helpers_free_gpu_mem(
+ struct dc_context *ctx,
+ enum dc_gpu_mem_alloc_type type,
+ void *pvMem)
+{
+ // TODO
+}
+#endif
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
index ae0a7ef1d595..cf15248739f7 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
@@ -522,6 +522,8 @@ static void increase_dsc_bpp(struct drm_atomic_state *state,
int link_timeslots_used;
int fair_pbn_alloc;
+ pbn_per_timeslot = dm_mst_get_pbn_divider(dc_link);
+
for (i = 0; i < count; i++) {
if (vars[i].dsc_enabled) {
initial_slack[i] = kbps_to_peak_pbn(params[i].bw_range.max_kbps) - vars[i].pbn;
@@ -533,9 +535,6 @@ static void increase_dsc_bpp(struct drm_atomic_state *state,
}
}
- pbn_per_timeslot = dc_link_bandwidth_kbps(dc_link,
- dc_link_get_link_cap(dc_link)) / (8 * 1000 * 54);
-
while (remaining_to_increase) {
next_index = -1;
min_initial_slack = -1;
@@ -564,7 +563,7 @@ static void increase_dsc_bpp(struct drm_atomic_state *state,
params[next_index].port->mgr,
params[next_index].port,
vars[next_index].pbn,
- dm_mst_get_pbn_divider(dc_link)) < 0)
+ pbn_per_timeslot) < 0)
return;
if (!drm_dp_mst_atomic_check(state)) {
vars[next_index].bpp_x16 = bpp_x16_from_pbn(params[next_index], vars[next_index].pbn);
@@ -574,7 +573,7 @@ static void increase_dsc_bpp(struct drm_atomic_state *state,
params[next_index].port->mgr,
params[next_index].port,
vars[next_index].pbn,
- dm_mst_get_pbn_divider(dc_link)) < 0)
+ pbn_per_timeslot) < 0)
return;
}
} else {
@@ -583,7 +582,7 @@ static void increase_dsc_bpp(struct drm_atomic_state *state,
params[next_index].port->mgr,
params[next_index].port,
vars[next_index].pbn,
- dm_mst_get_pbn_divider(dc_link)) < 0)
+ pbn_per_timeslot) < 0)
return;
if (!drm_dp_mst_atomic_check(state)) {
vars[next_index].bpp_x16 = params[next_index].bw_range.max_target_bpp_x16;
@@ -593,7 +592,7 @@ static void increase_dsc_bpp(struct drm_atomic_state *state,
params[next_index].port->mgr,
params[next_index].port,
vars[next_index].pbn,
- dm_mst_get_pbn_divider(dc_link)) < 0)
+ pbn_per_timeslot) < 0)
return;
}
}
@@ -647,7 +646,7 @@ static void try_disable_dsc(struct drm_atomic_state *state,
params[next_index].port->mgr,
params[next_index].port,
vars[next_index].pbn,
- 0) < 0)
+ dm_mst_get_pbn_divider(dc_link)) < 0)
return;
if (!drm_dp_mst_atomic_check(state)) {
@@ -718,7 +717,7 @@ static bool compute_mst_dsc_configs_for_link(struct drm_atomic_state *state,
params[i].port->mgr,
params[i].port,
vars[i].pbn,
- 0) < 0)
+ dm_mst_get_pbn_divider(dc_link)) < 0)
return false;
}
if (!drm_dp_mst_atomic_check(state)) {
@@ -746,7 +745,7 @@ static bool compute_mst_dsc_configs_for_link(struct drm_atomic_state *state,
params[i].port->mgr,
params[i].port,
vars[i].pbn,
- 0) < 0)
+ dm_mst_get_pbn_divider(dc_link)) < 0)
return false;
}
}
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_pp_smu.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_pp_smu.c
index a2e1a73f66b8..c5f2216e59c4 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_pp_smu.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_pp_smu.c
@@ -106,7 +106,7 @@ bool dm_pp_apply_display_requirements(
adev->powerplay.pp_funcs->display_configuration_change(
adev->powerplay.pp_handle,
&adev->pm.pm_display_cfg);
- else
+ else if (adev->smu.ppt_funcs)
smu_display_configuration_change(smu,
&adev->pm.pm_display_cfg);
@@ -530,6 +530,8 @@ bool dm_pp_get_static_clocks(
&pp_clk_info);
else if (adev->smu.ppt_funcs)
ret = smu_get_current_clocks(&adev->smu, &pp_clk_info);
+ else
+ return false;
if (ret)
return false;
@@ -590,7 +592,7 @@ void pp_rv_set_wm_ranges(struct pp_smu *pp,
if (pp_funcs && pp_funcs->set_watermarks_for_clocks_ranges)
pp_funcs->set_watermarks_for_clocks_ranges(pp_handle,
&wm_with_clock_ranges);
- else
+ else if (adev->smu.ppt_funcs)
smu_set_watermarks_for_clock_ranges(&adev->smu,
&wm_with_clock_ranges);
}
@@ -660,7 +662,7 @@ void pp_rv_set_hard_min_fclk_by_freq(struct pp_smu *pp, int mhz)
pp_funcs->set_hard_min_fclk_by_freq(pp_handle, mhz);
}
-enum pp_smu_status pp_nv_set_wm_ranges(struct pp_smu *pp,
+static enum pp_smu_status pp_nv_set_wm_ranges(struct pp_smu *pp,
struct pp_smu_wm_range_sets *ranges)
{
const struct dc_context *ctx = pp->dm;
@@ -728,7 +730,7 @@ enum pp_smu_status pp_nv_set_pme_wa_enable(struct pp_smu *pp)
return PP_SMU_RESULT_OK;
}
-enum pp_smu_status pp_nv_set_display_count(struct pp_smu *pp, int count)
+static enum pp_smu_status pp_nv_set_display_count(struct pp_smu *pp, int count)
{
const struct dc_context *ctx = pp->dm;
struct amdgpu_device *adev = ctx->driver_context;
@@ -744,7 +746,8 @@ enum pp_smu_status pp_nv_set_display_count(struct pp_smu *pp, int count)
return PP_SMU_RESULT_OK;
}
-enum pp_smu_status pp_nv_set_min_deep_sleep_dcfclk(struct pp_smu *pp, int mhz)
+static enum pp_smu_status
+pp_nv_set_min_deep_sleep_dcfclk(struct pp_smu *pp, int mhz)
{
const struct dc_context *ctx = pp->dm;
struct amdgpu_device *adev = ctx->driver_context;
@@ -760,7 +763,7 @@ enum pp_smu_status pp_nv_set_min_deep_sleep_dcfclk(struct pp_smu *pp, int mhz)
return PP_SMU_RESULT_OK;
}
-enum pp_smu_status pp_nv_set_hard_min_dcefclk_by_freq(
+static enum pp_smu_status pp_nv_set_hard_min_dcefclk_by_freq(
struct pp_smu *pp, int mhz)
{
const struct dc_context *ctx = pp->dm;
@@ -783,7 +786,8 @@ enum pp_smu_status pp_nv_set_hard_min_dcefclk_by_freq(
return PP_SMU_RESULT_OK;
}
-enum pp_smu_status pp_nv_set_hard_min_uclk_by_freq(struct pp_smu *pp, int mhz)
+static enum pp_smu_status
+pp_nv_set_hard_min_uclk_by_freq(struct pp_smu *pp, int mhz)
{
const struct dc_context *ctx = pp->dm;
struct amdgpu_device *adev = ctx->driver_context;
@@ -805,7 +809,7 @@ enum pp_smu_status pp_nv_set_hard_min_uclk_by_freq(struct pp_smu *pp, int mhz)
return PP_SMU_RESULT_OK;
}
-enum pp_smu_status pp_nv_set_pstate_handshake_support(
+static enum pp_smu_status pp_nv_set_pstate_handshake_support(
struct pp_smu *pp, BOOLEAN pstate_handshake_supported)
{
const struct dc_context *ctx = pp->dm;
@@ -818,7 +822,7 @@ enum pp_smu_status pp_nv_set_pstate_handshake_support(
return PP_SMU_RESULT_OK;
}
-enum pp_smu_status pp_nv_set_voltage_by_freq(struct pp_smu *pp,
+static enum pp_smu_status pp_nv_set_voltage_by_freq(struct pp_smu *pp,
enum pp_smu_nv_clock_id clock_id, int mhz)
{
const struct dc_context *ctx = pp->dm;
@@ -853,7 +857,7 @@ enum pp_smu_status pp_nv_set_voltage_by_freq(struct pp_smu *pp,
return PP_SMU_RESULT_OK;
}
-enum pp_smu_status pp_nv_get_maximum_sustainable_clocks(
+static enum pp_smu_status pp_nv_get_maximum_sustainable_clocks(
struct pp_smu *pp, struct pp_smu_nv_clock_table *max_clocks)
{
const struct dc_context *ctx = pp->dm;
@@ -872,7 +876,7 @@ enum pp_smu_status pp_nv_get_maximum_sustainable_clocks(
return PP_SMU_RESULT_FAIL;
}
-enum pp_smu_status pp_nv_get_uclk_dpm_states(struct pp_smu *pp,
+static enum pp_smu_status pp_nv_get_uclk_dpm_states(struct pp_smu *pp,
unsigned int *clock_values_in_khz, unsigned int *num_states)
{
const struct dc_context *ctx = pp->dm;
@@ -892,7 +896,7 @@ enum pp_smu_status pp_nv_get_uclk_dpm_states(struct pp_smu *pp,
return PP_SMU_RESULT_FAIL;
}
-enum pp_smu_status pp_rn_get_dpm_clock_table(
+static enum pp_smu_status pp_rn_get_dpm_clock_table(
struct pp_smu *pp, struct dpm_clocks *clock_table)
{
const struct dc_context *ctx = pp->dm;
@@ -911,7 +915,7 @@ enum pp_smu_status pp_rn_get_dpm_clock_table(
return PP_SMU_RESULT_FAIL;
}
-enum pp_smu_status pp_rn_set_wm_ranges(struct pp_smu *pp,
+static enum pp_smu_status pp_rn_set_wm_ranges(struct pp_smu *pp,
struct pp_smu_wm_range_sets *ranges)
{
const struct dc_context *ctx = pp->dm;
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_services.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_services.c
index 022da5d45d4d..51f57420fadd 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_services.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_services.c
@@ -47,29 +47,4 @@ void dm_perf_trace_timestamp(const char *func_name, unsigned int line)
{
}
-bool dm_write_persistent_data(struct dc_context *ctx,
- const struct dc_sink *sink,
- const char *module_name,
- const char *key_name,
- void *params,
- unsigned int size,
- struct persistent_data_flag *flag)
-{
- /*TODO implement*/
- return false;
-}
-
-bool dm_read_persistent_data(struct dc_context *ctx,
- const struct dc_sink *sink,
- const char *module_name,
- const char *key_name,
- void *params,
- unsigned int size,
- struct persistent_data_flag *flag)
-{
- /*TODO implement*/
- return false;
-}
-
/**** power component interfaces ****/
-
diff --git a/drivers/gpu/drm/amd/display/dc/Makefile b/drivers/gpu/drm/amd/display/dc/Makefile
index 6e3dddc73246..e0f4f1be1618 100644
--- a/drivers/gpu/drm/amd/display/dc/Makefile
+++ b/drivers/gpu/drm/amd/display/dc/Makefile
@@ -32,6 +32,10 @@ DC_LIBS += dcn10 dml
DC_LIBS += dcn21
endif
+ifdef CONFIG_DRM_AMD_DC_DCN3_0
+DC_LIBS += dcn30
+endif
+
DC_LIBS += dce120
DC_LIBS += dce112
diff --git a/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c b/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c
index 37fa7b48250e..b8684131151d 100644
--- a/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c
+++ b/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c
@@ -1154,6 +1154,9 @@ static enum bp_result bios_parser_get_firmware_info(
result = get_firmware_info_v3_2(bp, info);
break;
case 3:
+#ifdef CONFIG_DRM_AMD_DC_DCN3_0
+ case 4:
+#endif
result = get_firmware_info_v3_2(bp, info);
break;
default:
@@ -1375,6 +1378,63 @@ static struct atom_encoder_caps_record *get_encoder_cap_record(
return NULL;
}
+static enum bp_result get_vram_info_v23(
+ struct bios_parser *bp,
+ struct dc_vram_info *info)
+{
+ struct atom_vram_info_header_v2_3 *info_v23;
+ enum bp_result result = BP_RESULT_OK;
+
+ info_v23 = GET_IMAGE(struct atom_vram_info_header_v2_3,
+ DATA_TABLES(vram_info));
+
+ if (info_v23 == NULL)
+ return BP_RESULT_BADBIOSTABLE;
+
+ info->num_chans = info_v23->vram_module[0].channel_num;
+ info->dram_channel_width_bytes = (1 << info_v23->vram_module[0].channel_width) / 8;
+
+ return result;
+}
+
+static enum bp_result get_vram_info_v24(
+ struct bios_parser *bp,
+ struct dc_vram_info *info)
+{
+ struct atom_vram_info_header_v2_4 *info_v24;
+ enum bp_result result = BP_RESULT_OK;
+
+ info_v24 = GET_IMAGE(struct atom_vram_info_header_v2_4,
+ DATA_TABLES(vram_info));
+
+ if (info_v24 == NULL)
+ return BP_RESULT_BADBIOSTABLE;
+
+ info->num_chans = info_v24->vram_module[0].channel_num;
+ info->dram_channel_width_bytes = (1 << info_v24->vram_module[0].channel_width) / 8;
+
+ return result;
+}
+
+static enum bp_result get_vram_info_v25(
+ struct bios_parser *bp,
+ struct dc_vram_info *info)
+{
+ struct atom_vram_info_header_v2_5 *info_v25;
+ enum bp_result result = BP_RESULT_OK;
+
+ info_v25 = GET_IMAGE(struct atom_vram_info_header_v2_5,
+ DATA_TABLES(vram_info));
+
+ if (info_v25 == NULL)
+ return BP_RESULT_BADBIOSTABLE;
+
+ info->num_chans = info_v25->vram_module[0].channel_num;
+ info->dram_channel_width_bytes = (1 << info_v25->vram_module[0].channel_width) / 8;
+
+ return result;
+}
+
/*
* get_integrated_info_v11
*
@@ -1666,6 +1726,46 @@ static enum bp_result construct_integrated_info(
return result;
}
+static enum bp_result bios_parser_get_vram_info(
+ struct dc_bios *dcb,
+ struct dc_vram_info *info)
+{
+ struct bios_parser *bp = BP_FROM_DCB(dcb);
+ enum bp_result result = BP_RESULT_BADBIOSTABLE;
+ struct atom_common_table_header *header;
+ struct atom_data_revision revision;
+
+ if (info && DATA_TABLES(vram_info)) {
+ header = GET_IMAGE(struct atom_common_table_header,
+ DATA_TABLES(vram_info));
+
+ get_atom_data_table_revision(header, &revision);
+
+ switch (revision.major) {
+ case 2:
+ switch (revision.minor) {
+ case 3:
+ result = get_vram_info_v23(bp, info);
+ break;
+ case 4:
+ result = get_vram_info_v24(bp, info);
+ break;
+ case 5:
+ result = get_vram_info_v25(bp, info);
+ break;
+ default:
+ break;
+ }
+ break;
+
+ default:
+ return result;
+ }
+
+ }
+ return result;
+}
+
static struct integrated_info *bios_parser_create_integrated_info(
struct dc_bios *dcb)
{
@@ -1877,6 +1977,108 @@ static enum bp_result bios_get_board_layout_info(
return BP_RESULT_OK;
}
+
+static uint16_t bios_parser_pack_data_tables(
+ struct dc_bios *dcb,
+ void *dst)
+{
+#ifdef PACK_BIOS_DATA
+ struct bios_parser *bp = BP_FROM_DCB(dcb);
+ struct atom_rom_header_v2_2 *rom_header = NULL;
+ struct atom_rom_header_v2_2 *packed_rom_header = NULL;
+ struct atom_common_table_header *data_tbl_header = NULL;
+ struct atom_master_list_of_data_tables_v2_1 *data_tbl_list = NULL;
+ struct atom_master_data_table_v2_1 *packed_master_data_tbl = NULL;
+ struct atom_data_revision tbl_rev = {0};
+ uint16_t *rom_header_offset = NULL;
+ const uint8_t *bios = bp->base.bios;
+ uint8_t *bios_dst = (uint8_t *)dst;
+ uint16_t packed_rom_header_offset;
+ uint16_t packed_masterdatatable_offset;
+ uint16_t packed_data_tbl_offset;
+ uint16_t data_tbl_offset;
+ unsigned int i;
+
+ rom_header_offset =
+ GET_IMAGE(uint16_t, OFFSET_TO_ATOM_ROM_HEADER_POINTER);
+
+ if (!rom_header_offset)
+ return 0;
+
+ rom_header = GET_IMAGE(struct atom_rom_header_v2_2, *rom_header_offset);
+
+ if (!rom_header)
+ return 0;
+
+ get_atom_data_table_revision(&rom_header->table_header, &tbl_rev);
+ if (!(tbl_rev.major >= 2 && tbl_rev.minor >= 2))
+ return 0;
+
+ get_atom_data_table_revision(&bp->master_data_tbl->table_header, &tbl_rev);
+ if (!(tbl_rev.major >= 2 && tbl_rev.minor >= 1))
+ return 0;
+
+ packed_rom_header_offset =
+ OFFSET_TO_ATOM_ROM_HEADER_POINTER + sizeof(*rom_header_offset);
+
+ packed_masterdatatable_offset =
+ packed_rom_header_offset + rom_header->table_header.structuresize;
+
+ packed_data_tbl_offset =
+ packed_masterdatatable_offset +
+ bp->master_data_tbl->table_header.structuresize;
+
+ packed_rom_header =
+ (struct atom_rom_header_v2_2 *)(bios_dst + packed_rom_header_offset);
+
+ packed_master_data_tbl =
+ (struct atom_master_data_table_v2_1 *)(bios_dst +
+ packed_masterdatatable_offset);
+
+ memcpy(bios_dst, bios, OFFSET_TO_ATOM_ROM_HEADER_POINTER);
+
+ *((uint16_t *)(bios_dst + OFFSET_TO_ATOM_ROM_HEADER_POINTER)) =
+ packed_rom_header_offset;
+
+ memcpy(bios_dst + packed_rom_header_offset, rom_header,
+ rom_header->table_header.structuresize);
+
+ packed_rom_header->masterdatatable_offset = packed_masterdatatable_offset;
+
+ memcpy(&packed_master_data_tbl->table_header,
+ &bp->master_data_tbl->table_header,
+ sizeof(bp->master_data_tbl->table_header));
+
+ data_tbl_list = &bp->master_data_tbl->listOfdatatables;
+
+ /* Each data table offset in data table list is 2 bytes,
+ * we can use that to iterate through listOfdatatables
+ * without knowing the name of each member.
+ */
+ for (i = 0; i < sizeof(*data_tbl_list)/sizeof(uint16_t); i++) {
+ data_tbl_offset = *((uint16_t *)data_tbl_list + i);
+
+ if (data_tbl_offset) {
+ data_tbl_header =
+ (struct atom_common_table_header *)(bios + data_tbl_offset);
+
+ memcpy(bios_dst + packed_data_tbl_offset, data_tbl_header,
+ data_tbl_header->structuresize);
+
+ *((uint16_t *)&packed_master_data_tbl->listOfdatatables + i) =
+ packed_data_tbl_offset;
+
+ packed_data_tbl_offset += data_tbl_header->structuresize;
+ } else {
+ *((uint16_t *)&packed_master_data_tbl->listOfdatatables + i) = 0;
+ }
+ }
+ return packed_data_tbl_offset;
+#endif
+ // TODO: There is data bytes alignment issue, disable it for now.
+ return 0;
+}
+
static const struct dc_vbios_funcs vbios_funcs = {
.get_connectors_number = bios_parser_get_connectors_number,
@@ -1925,6 +2127,7 @@ static const struct dc_vbios_funcs vbios_funcs = {
.bios_parser_destroy = firmware_parser_destroy,
.get_board_layout_info = bios_get_board_layout_info,
+ .pack_data_tables = bios_parser_pack_data_tables,
};
static bool bios_parser2_construct(
@@ -2006,6 +2209,7 @@ static bool bios_parser2_construct(
bp->base.integrated_info = bios_parser_create_integrated_info(&bp->base);
bp->base.fw_info_valid = bios_parser_get_firmware_info(&bp->base, &bp->base.fw_info) == BP_RESULT_OK;
+ bios_parser_get_vram_info(&bp->base, &bp->base.vram_info);
return true;
}
diff --git a/drivers/gpu/drm/amd/display/dc/bios/command_table_helper2.c b/drivers/gpu/drm/amd/display/dc/bios/command_table_helper2.c
index 204d7942a6e5..21ff6b686f5f 100644
--- a/drivers/gpu/drm/amd/display/dc/bios/command_table_helper2.c
+++ b/drivers/gpu/drm/amd/display/dc/bios/command_table_helper2.c
@@ -65,6 +65,11 @@ bool dal_bios_parser_init_cmd_tbl_helper2(
*h = dal_cmd_tbl_helper_dce112_get_table2();
return true;
#endif
+#if defined(CONFIG_DRM_AMD_DC_DCN3_0)
+ case DCN_VERSION_3_0:
+ *h = dal_cmd_tbl_helper_dce112_get_table2();
+ return true;
+#endif
default:
/* Unsupported DCE */
diff --git a/drivers/gpu/drm/amd/display/dc/bios/dce112/command_table_helper2_dce112.c b/drivers/gpu/drm/amd/display/dc/bios/dce112/command_table_helper2_dce112.c
index 755b6e33140a..bf0affef893f 100644
--- a/drivers/gpu/drm/amd/display/dc/bios/dce112/command_table_helper2_dce112.c
+++ b/drivers/gpu/drm/amd/display/dc/bios/dce112/command_table_helper2_dce112.c
@@ -388,3 +388,43 @@ const struct command_table_helper *dal_cmd_tbl_helper_dce112_get_table2(void)
{
return &command_table_helper_funcs;
}
+
+#if defined(CONFIG_DRM_AMD_DC_DCN3_0)
+/* function table */
+static const struct command_table_helper command_table_helper_funcs_dcn2x = {
+ .controller_id_to_atom = dal_cmd_table_helper_controller_id_to_atom2,
+ .encoder_action_to_atom = encoder_action_to_atom,
+ .engine_bp_to_atom = engine_bp_to_atom,
+ .clock_source_id_to_atom = clock_source_id_to_atom,
+ .clock_source_id_to_atom_phy_clk_src_id =
+ clock_source_id_to_atom_phy_clk_src_id,
+ .signal_type_to_atom_dig_mode = signal_type_to_atom_dig_mode,
+ .hpd_sel_to_atom = hpd_sel_to_atom,
+ .dig_encoder_sel_to_atom = dig_encoder_sel_to_atom,
+ .phy_id_to_atom = phy_id_to_atom,
+ .disp_power_gating_action_to_atom = disp_power_gating_action_to_atom,
+ .clock_source_id_to_ref_clk_src = NULL,
+ .transmitter_bp_to_atom = NULL,
+ .encoder_id_to_atom = dal_cmd_table_helper_encoder_id_to_atom2,
+ .encoder_mode_bp_to_atom =
+ dal_cmd_table_helper_encoder_mode_bp_to_atom2,
+ .dc_clock_type_to_atom = dc_clock_type_to_atom,
+ .transmitter_color_depth_to_atom = transmitter_color_depth_to_atom,
+
+};
+
+/*
+ * dal_cmd_tbl_helper_dce110_get_table
+ *
+ * @brief
+ * Initialize command table helper functions
+ *
+ * @param
+ * const struct command_table_helper **h - [out] struct of functions
+ *
+ */
+const struct command_table_helper *dal_cmd_tbl_helper_dcn2_get_table2(void)
+{
+ return &command_table_helper_funcs_dcn2x;
+}
+#endif
diff --git a/drivers/gpu/drm/amd/display/dc/bios/dce112/command_table_helper2_dce112.h b/drivers/gpu/drm/amd/display/dc/bios/dce112/command_table_helper2_dce112.h
index abf28a06f5bc..2d9e9f3c579d 100644
--- a/drivers/gpu/drm/amd/display/dc/bios/dce112/command_table_helper2_dce112.h
+++ b/drivers/gpu/drm/amd/display/dc/bios/dce112/command_table_helper2_dce112.h
@@ -30,5 +30,8 @@ struct command_table_helper;
/* Initialize command table helper functions */
const struct command_table_helper *dal_cmd_tbl_helper_dce112_get_table2(void);
+#if defined(CONFIG_DRM_AMD_DC_DCN3_0)
+const struct command_table_helper *dal_cmd_tbl_helper_dcn2_get_table2(void);
+#endif
#endif /* __DAL_COMMAND_TABLE_HELPER_DCE110_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/calcs/dcn_calc_auto.c b/drivers/gpu/drm/amd/display/dc/calcs/dcn_calc_auto.c
index 1ef0074302c5..41284e263325 100644
--- a/drivers/gpu/drm/amd/display/dc/calcs/dcn_calc_auto.c
+++ b/drivers/gpu/drm/amd/display/dc/calcs/dcn_calc_auto.c
@@ -805,7 +805,7 @@ void mode_support_and_system_configuration(struct dcn_bw_internal_vars *v)
if (v->pte_enable == dcn_bw_yes && v->dcc_enable[k] == dcn_bw_yes) {
v->time_for_meta_pte_without_immediate_flip = dcn_bw_max3(
- v->meta_pte_bytes_frame[k] / v->prefetch_bandwidth[k],
+ v->meta_pte_bytes_frame[k] / v->prefetch_bw[k],
v->extra_latency,
v->htotal[k] / v->pixel_clock[k] / 4.0);
} else {
@@ -814,7 +814,7 @@ void mode_support_and_system_configuration(struct dcn_bw_internal_vars *v)
if (v->pte_enable == dcn_bw_yes || v->dcc_enable[k] == dcn_bw_yes) {
v->time_for_meta_and_dpte_row_without_immediate_flip = dcn_bw_max3((
- v->meta_row_bytes[k] + v->dpte_bytes_per_row[k]) / v->prefetch_bandwidth[k],
+ v->meta_row_bytes[k] + v->dpte_bytes_per_row[k]) / v->prefetch_bw[k],
v->htotal[k] / v->pixel_clock[k] - v->time_for_meta_pte_without_immediate_flip,
v->extra_latency);
} else {
@@ -827,7 +827,7 @@ void mode_support_and_system_configuration(struct dcn_bw_internal_vars *v)
v->lines_for_meta_and_dpte_row_without_immediate_flip[k] =dcn_bw_floor2(4.0 * (v->time_for_meta_and_dpte_row_without_immediate_flip / (v->htotal[k] / v->pixel_clock[k]) + 0.125), 1.0) / 4;
v->maximum_vstartup = v->maximum_vstartup - 1;
- if (v->lines_for_meta_pte_without_immediate_flip[k] < 8.0 && v->lines_for_meta_and_dpte_row_without_immediate_flip[k] < 16.0)
+ if (v->lines_for_meta_pte_without_immediate_flip[k] < 32.0 && v->lines_for_meta_and_dpte_row_without_immediate_flip[k] < 16.0)
break;
} while(1);
diff --git a/drivers/gpu/drm/amd/display/dc/calcs/dcn_calcs.c b/drivers/gpu/drm/amd/display/dc/calcs/dcn_calcs.c
index 1e5a92b192a1..51397b565ddf 100644
--- a/drivers/gpu/drm/amd/display/dc/calcs/dcn_calcs.c
+++ b/drivers/gpu/drm/amd/display/dc/calcs/dcn_calcs.c
@@ -302,10 +302,17 @@ static void pipe_ctx_to_e2e_pipe_params (
struct _vcs_dpi_display_pipe_params_st *input)
{
input->src.is_hsplit = false;
- if (pipe->top_pipe != NULL && pipe->top_pipe->plane_state == pipe->plane_state)
+
+ /* stereo can never be split */
+ if (pipe->plane_state->stereo_format == PLANE_STEREO_FORMAT_SIDE_BY_SIDE ||
+ pipe->plane_state->stereo_format == PLANE_STEREO_FORMAT_TOP_AND_BOTTOM) {
+ /* reset the split group if it was already considered split. */
+ input->src.hsplit_grp = pipe->pipe_idx;
+ } else if (pipe->top_pipe != NULL && pipe->top_pipe->plane_state == pipe->plane_state) {
input->src.is_hsplit = true;
- else if (pipe->bottom_pipe != NULL && pipe->bottom_pipe->plane_state == pipe->plane_state)
+ } else if (pipe->bottom_pipe != NULL && pipe->bottom_pipe->plane_state == pipe->plane_state) {
input->src.is_hsplit = true;
+ }
if (pipe->plane_res.dpp->ctx->dc->debug.optimized_watermark) {
/*
@@ -374,6 +381,13 @@ static void pipe_ctx_to_e2e_pipe_params (
input->src.viewport_width_c = input->src.viewport_width;
input->src.viewport_height_c = input->src.viewport_height;
break;
+#if defined(CONFIG_DRM_AMD_DC_DCN3_0)
+ case SURFACE_PIXEL_FORMAT_GRPH_RGBE_ALPHA:
+ input->src.source_format = dm_rgbe_alpha;
+ input->src.viewport_width_c = input->src.viewport_width;
+ input->src.viewport_height_c = input->src.viewport_height;
+ break;
+#endif
default:
input->src.source_format = dm_444_32;
input->src.viewport_width_c = input->src.viewport_width;
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/Makefile b/drivers/gpu/drm/amd/display/dc/clk_mgr/Makefile
index c0f6a8c7de7d..6874276bb2a1 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/Makefile
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/Makefile
@@ -97,3 +97,13 @@ AMD_DAL_CLK_MGR_DCN21 = $(addprefix $(AMDDALPATH)/dc/clk_mgr/dcn21/,$(CLK_MGR_DC
AMD_DISPLAY_FILES += $(AMD_DAL_CLK_MGR_DCN21)
endif
+ifdef CONFIG_DRM_AMD_DC_DCN3_0
+###############################################################################
+# DCN30
+###############################################################################
+CLK_MGR_DCN30 = dcn30_clk_mgr.o dcn30_clk_mgr_smu_msg.o
+
+AMD_DAL_CLK_MGR_DCN30 = $(addprefix $(AMDDALPATH)/dc/clk_mgr/dcn30/,$(CLK_MGR_DCN30))
+
+AMD_DISPLAY_FILES += $(AMD_DAL_CLK_MGR_DCN30)
+endif
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/clk_mgr.c
index a5c2114e4292..f376058b5df6 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/clk_mgr.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/clk_mgr.c
@@ -38,6 +38,9 @@
#include "dcn10/rv2_clk_mgr.h"
#include "dcn20/dcn20_clk_mgr.h"
#include "dcn21/rn_clk_mgr.h"
+#if defined(CONFIG_DRM_AMD_DC_DCN3_0)
+#include "dcn30/dcn30_clk_mgr.h"
+#endif
int clk_mgr_helper_get_active_display_cnt(
@@ -169,6 +172,15 @@ struct clk_mgr *dc_clk_mgr_create(struct dc_context *ctx, struct pp_smu_funcs *p
break;
case FAMILY_NV:
+#if defined(CONFIG_DRM_AMD_DC_DCN3_0)
+ if (ASICREV_IS_SIENNA_CICHLID_P(asic_id.hw_internal_rev)) {
+ /* TODO: to add SIENNA_CICHLID clk_mgr support, once CLK IP header files are available,
+ * for now use DCN3AG clk mgr.
+ */
+ dcn3_clk_mgr_construct(ctx, clk_mgr, pp_smu, dccg);
+ break;
+ }
+#endif
dcn20_clk_mgr_construct(ctx, clk_mgr, pp_smu, dccg);
break;
#endif /* Family RV and NV*/
@@ -184,6 +196,16 @@ struct clk_mgr *dc_clk_mgr_create(struct dc_context *ctx, struct pp_smu_funcs *p
void dc_destroy_clk_mgr(struct clk_mgr *clk_mgr_base)
{
struct clk_mgr_internal *clk_mgr = TO_CLK_MGR_INTERNAL(clk_mgr_base);
+#ifdef CONFIG_DRM_AMD_DC_DCN3_0
+
+ switch (clk_mgr_base->ctx->asic_id.chip_family) {
+ case FAMILY_NV:
+ if (ASICREV_IS_SIENNA_CICHLID_P(clk_mgr_base->ctx->asic_id.hw_internal_rev)) {
+ dcn3_clk_mgr_destroy(clk_mgr);
+ break;
+ }
+ }
+#endif
kfree(clk_mgr);
}
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn20/dcn20_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn20/dcn20_clk_mgr.c
index 55d09adbf0d9..c63ec960e116 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn20/dcn20_clk_mgr.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn20/dcn20_clk_mgr.c
@@ -234,20 +234,26 @@ void dcn2_update_clocks(struct clk_mgr *clk_mgr_base,
dpp_clock_lowered = true;
clk_mgr->base.clks.dppclk_khz = new_clocks->dppclk_khz;
- if (pp_smu && pp_smu->set_voltage_by_freq)
- pp_smu->set_voltage_by_freq(&pp_smu->pp_smu, PP_SMU_NV_PIXELCLK, clk_mgr_base->clks.dppclk_khz / 1000);
-
update_dppclk = true;
}
if (should_set_clock(safe_to_lower, new_clocks->dispclk_khz, clk_mgr_base->clks.dispclk_khz)) {
clk_mgr_base->clks.dispclk_khz = new_clocks->dispclk_khz;
- if (pp_smu && pp_smu->set_voltage_by_freq)
- pp_smu->set_voltage_by_freq(&pp_smu->pp_smu, PP_SMU_NV_DISPCLK, clk_mgr_base->clks.dispclk_khz / 1000);
update_dispclk = true;
}
+ if (update_dppclk || update_dispclk) {
+ new_clocks->disp_dpp_voltage_level_khz = new_clocks->dppclk_khz;
+
+ if (update_dispclk)
+ new_clocks->disp_dpp_voltage_level_khz = new_clocks->dispclk_khz > new_clocks->dppclk_khz ? new_clocks->dispclk_khz : new_clocks->dppclk_khz;
+
+ clk_mgr_base->clks.disp_dpp_voltage_level_khz = new_clocks->disp_dpp_voltage_level_khz;
+ if (pp_smu && pp_smu->set_voltage_by_freq)
+ pp_smu->set_voltage_by_freq(&pp_smu->pp_smu, PP_SMU_NV_DISPCLK, clk_mgr_base->clks.disp_dpp_voltage_level_khz / 1000);
+ }
+
if (dc->config.forced_clocks == false || (force_reset && safe_to_lower)) {
if (dpp_clock_lowered) {
// if clock is being lowered, increase DTO before lowering refclk
@@ -403,6 +409,8 @@ static bool dcn2_are_clock_states_equal(struct dc_clocks *a,
return false;
else if (a->dppclk_khz != b->dppclk_khz)
return false;
+ else if (a->disp_dpp_voltage_level_khz != b->disp_dpp_voltage_level_khz)
+ return false;
else if (a->dcfclk_khz != b->dcfclk_khz)
return false;
else if (a->socclk_khz != b->socclk_khz)
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr.c
index 24c5765890fa..39788a7bd003 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr.c
@@ -153,8 +153,9 @@ void rn_update_clocks(struct clk_mgr *clk_mgr_base,
}
// workaround: Limit dppclk to 100Mhz to avoid lower eDP panel switch to plus 4K monitor underflow.
+ // Do not adjust dppclk if dppclk is 0 to avoid unexpected result
if (!IS_DIAG_DC(dc->ctx->dce_environment)) {
- if (new_clocks->dppclk_khz < 100000)
+ if (new_clocks->dppclk_khz < 100000 && new_clocks->dppclk_khz > 0)
new_clocks->dppclk_khz = 100000;
}
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn30/dalsmc.h b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn30/dalsmc.h
new file mode 100644
index 000000000000..5ed03287aaaf
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn30/dalsmc.h
@@ -0,0 +1,60 @@
+/*
+ * Copyright 2020 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+// TEMPORARY until this exists in the proper location
+#ifndef DALSMC_H
+#define DALSMC_H
+
+#define DALSMC_VERSION 0x1
+
+// SMU Response Codes:
+#define DALSMC_Result_OK 0x1
+#define DALSMC_Result_Failed 0xFF
+#define DALSMC_Result_UnknownCmd 0xFE
+#define DALSMC_Result_CmdRejectedPrereq 0xFD
+#define DALSMC_Result_CmdRejectedBusy 0xFC
+
+
+
+// Message Definitions:
+#define DALSMC_MSG_TestMessage 0x1
+#define DALSMC_MSG_GetSmuVersion 0x2
+#define DALSMC_MSG_GetDriverIfVersion 0x3
+#define DALSMC_MSG_GetMsgHeaderVersion 0x4
+#define DALSMC_MSG_SetDalDramAddrHigh 0x5
+#define DALSMC_MSG_SetDalDramAddrLow 0x6
+#define DALSMC_MSG_TransferTableSmu2Dram 0x7
+#define DALSMC_MSG_TransferTableDram2Smu 0x8
+#define DALSMC_MSG_SetHardMinByFreq 0x9
+#define DALSMC_MSG_SetHardMaxByFreq 0xA
+#define DALSMC_MSG_GetDpmFreqByIndex 0xB
+#define DALSMC_MSG_GetDcModeMaxDpmFreq 0xC
+#define DALSMC_MSG_SetMinDeepSleepDcefclk 0xD
+#define DALSMC_MSG_NumOfDisplays 0xE
+#define DALSMC_MSG_SetExternalClientDfCstateAllow 0x10
+#define DALSMC_MSG_BacoAudioD3PME 0x11
+#define DALSMC_Message_Count 0x12
+
+#endif
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn30/dcn30_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn30/dcn30_clk_mgr.c
new file mode 100644
index 000000000000..b27cb52903f5
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn30/dcn30_clk_mgr.c
@@ -0,0 +1,543 @@
+/*
+ * Copyright 2020 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#include "dccg.h"
+#include "clk_mgr_internal.h"
+
+#include "dcn30_clk_mgr_smu_msg.h"
+#include "dcn20/dcn20_clk_mgr.h"
+#include "dce100/dce_clk_mgr.h"
+#include "reg_helper.h"
+#include "core_types.h"
+#include "dm_helpers.h"
+
+#include "atomfirmware.h"
+
+
+#include "sienna_cichlid_ip_offset.h"
+#include "dcn/dcn_3_0_0_offset.h"
+#include "dcn/dcn_3_0_0_sh_mask.h"
+
+#include "nbio/nbio_7_4_offset.h"
+
+#include "dcn/dpcs_3_0_0_offset.h"
+#include "dcn/dpcs_3_0_0_sh_mask.h"
+
+#include "mmhub/mmhub_2_0_0_offset.h"
+#include "mmhub/mmhub_2_0_0_sh_mask.h"
+/*we don't have clk folder yet*/
+#include "dcn30/dcn30_clk_mgr.h"
+
+#undef FN
+#define FN(reg_name, field_name) \
+ clk_mgr->clk_mgr_shift->field_name, clk_mgr->clk_mgr_mask->field_name
+
+#define REG(reg) \
+ (clk_mgr->regs->reg)
+
+#define BASE_INNER(seg) DCN_BASE__INST0_SEG ## seg
+
+#define BASE(seg) BASE_INNER(seg)
+
+#define SR(reg_name)\
+ .reg_name = BASE(mm ## reg_name ## _BASE_IDX) + \
+ mm ## reg_name
+
+#undef CLK_SRI
+#define CLK_SRI(reg_name, block, inst)\
+ .reg_name = mm ## block ## _ ## reg_name
+
+static const struct clk_mgr_registers clk_mgr_regs = {
+ CLK_REG_LIST_DCN3()
+};
+
+static const struct clk_mgr_shift clk_mgr_shift = {
+ CLK_COMMON_MASK_SH_LIST_DCN20_BASE(__SHIFT)
+};
+
+static const struct clk_mgr_mask clk_mgr_mask = {
+ CLK_COMMON_MASK_SH_LIST_DCN20_BASE(_MASK)
+};
+
+
+/* Query SMU for all clock states for a particular clock */
+static void dcn3_init_single_clock(struct clk_mgr_internal *clk_mgr, PPCLK_e clk, unsigned int *entry_0, unsigned int *num_levels)
+{
+ unsigned int i;
+ char *entry_i = (char *)entry_0;
+ uint32_t ret = dcn30_smu_get_dpm_freq_by_index(clk_mgr, clk, 0xFF);
+
+ if (ret & (1 << 31))
+ /* fine-grained, only min and max */
+ *num_levels = 2;
+ else
+ /* discrete, a number of fixed states */
+ /* will set num_levels to 0 on failure */
+ *num_levels = ret & 0xFF;
+
+ /* if the initial message failed, num_levels will be 0 */
+ for (i = 0; i < *num_levels; i++) {
+ *((unsigned int *)entry_i) = (dcn30_smu_get_dpm_freq_by_index(clk_mgr, clk, i) & 0xFFFF);
+ entry_i += sizeof(clk_mgr->base.bw_params->clk_table.entries[0]);
+ }
+}
+
+static void dcn3_build_wm_range_table(struct clk_mgr_internal *clk_mgr)
+{
+ /* defaults */
+ double pstate_latency_us = clk_mgr->base.ctx->dc->dml.soc.dram_clock_change_latency_us;
+ double sr_exit_time_us = clk_mgr->base.ctx->dc->dml.soc.sr_exit_time_us;
+ double sr_enter_plus_exit_time_us = clk_mgr->base.ctx->dc->dml.soc.sr_enter_plus_exit_time_us;
+ uint16_t min_uclk_mhz = clk_mgr->base.bw_params->clk_table.entries[0].memclk_mhz;
+
+ /* Set A - Normal - default values*/
+ clk_mgr->base.bw_params->wm_table.nv_entries[WM_A].valid = true;
+ clk_mgr->base.bw_params->wm_table.nv_entries[WM_A].dml_input.pstate_latency_us = pstate_latency_us;
+ clk_mgr->base.bw_params->wm_table.nv_entries[WM_A].dml_input.sr_exit_time_us = sr_exit_time_us;
+ clk_mgr->base.bw_params->wm_table.nv_entries[WM_A].dml_input.sr_enter_plus_exit_time_us = sr_enter_plus_exit_time_us;
+ clk_mgr->base.bw_params->wm_table.nv_entries[WM_A].pmfw_breakdown.wm_type = WATERMARKS_CLOCK_RANGE;
+ clk_mgr->base.bw_params->wm_table.nv_entries[WM_A].pmfw_breakdown.min_dcfclk = 0;
+ clk_mgr->base.bw_params->wm_table.nv_entries[WM_A].pmfw_breakdown.max_dcfclk = 0xFFFF;
+ clk_mgr->base.bw_params->wm_table.nv_entries[WM_A].pmfw_breakdown.min_uclk = min_uclk_mhz;
+ clk_mgr->base.bw_params->wm_table.nv_entries[WM_A].pmfw_breakdown.max_uclk = 0xFFFF;
+
+ /* Set B - Performance - higher minimum clocks */
+// clk_mgr->base.bw_params->wm_table.nv_entries[WM_B].valid = true;
+// clk_mgr->base.bw_params->wm_table.nv_entries[WM_B].dml_input.pstate_latency_us = pstate_latency_us;
+// clk_mgr->base.bw_params->wm_table.nv_entries[WM_B].dml_input.sr_exit_time_us = sr_exit_time_us;
+// clk_mgr->base.bw_params->wm_table.nv_entries[WM_B].dml_input.sr_enter_plus_exit_time_us = sr_enter_plus_exit_time_us;
+// clk_mgr->base.bw_params->wm_table.nv_entries[WM_B].pmfw_breakdown.wm_type = WATERMARKS_CLOCK_RANGE;
+// clk_mgr->base.bw_params->wm_table.nv_entries[WM_B].pmfw_breakdown.min_dcfclk = TUNED VALUE;
+// clk_mgr->base.bw_params->wm_table.nv_entries[WM_B].pmfw_breakdown.max_dcfclk = 0xFFFF;
+// clk_mgr->base.bw_params->wm_table.nv_entries[WM_B].pmfw_breakdown.min_uclk = TUNED VALUE;
+// clk_mgr->base.bw_params->wm_table.nv_entries[WM_B].pmfw_breakdown.max_uclk = 0xFFFF;
+
+ /* Set C - Dummy P-State - P-State latency set to "dummy p-state" value */
+ clk_mgr->base.bw_params->wm_table.nv_entries[WM_C].valid = true;
+ clk_mgr->base.bw_params->wm_table.nv_entries[WM_C].dml_input.pstate_latency_us = clk_mgr->base.ctx->dc->dml.soc.dummy_pstate_latency_us;
+ clk_mgr->base.bw_params->wm_table.nv_entries[WM_C].dml_input.sr_exit_time_us = sr_exit_time_us;
+ clk_mgr->base.bw_params->wm_table.nv_entries[WM_C].dml_input.sr_enter_plus_exit_time_us = sr_enter_plus_exit_time_us;
+ clk_mgr->base.bw_params->wm_table.nv_entries[WM_C].pmfw_breakdown.wm_type = WATERMARKS_DUMMY_PSTATE;
+ clk_mgr->base.bw_params->wm_table.nv_entries[WM_C].pmfw_breakdown.min_dcfclk = 0;
+ clk_mgr->base.bw_params->wm_table.nv_entries[WM_C].pmfw_breakdown.max_dcfclk = 0xFFFF;
+ clk_mgr->base.bw_params->wm_table.nv_entries[WM_C].pmfw_breakdown.min_uclk = min_uclk_mhz;
+ clk_mgr->base.bw_params->wm_table.nv_entries[WM_C].pmfw_breakdown.max_uclk = 0xFFFF;
+
+}
+
+void dcn3_init_clocks(struct clk_mgr *clk_mgr_base)
+{
+ struct clk_mgr_internal *clk_mgr = TO_CLK_MGR_INTERNAL(clk_mgr_base);
+ unsigned int num_levels;
+
+ memset(&(clk_mgr_base->clks), 0, sizeof(struct dc_clocks));
+ clk_mgr_base->clks.p_state_change_support = true;
+ clk_mgr_base->clks.prev_p_state_change_support = true;
+ clk_mgr->smu_present = false;
+
+ if (!clk_mgr_base->bw_params)
+ return;
+
+ if (!clk_mgr_base->force_smu_not_present && dcn30_smu_get_smu_version(clk_mgr, &clk_mgr->smu_ver))
+ clk_mgr->smu_present = true;
+
+ if (!clk_mgr->smu_present)
+ return;
+
+ // do we fail if these fail? if so, how? do we not care to check?
+ dcn30_smu_check_driver_if_version(clk_mgr);
+ dcn30_smu_check_msg_header_version(clk_mgr);
+
+ /* DCFCLK */
+ dcn3_init_single_clock(clk_mgr, PPCLK_DCEFCLK,
+ &clk_mgr_base->bw_params->clk_table.entries[0].dcfclk_mhz,
+ &num_levels);
+
+ /* DTBCLK */
+ dcn3_init_single_clock(clk_mgr, PPCLK_DTBCLK,
+ &clk_mgr_base->bw_params->clk_table.entries[0].dtbclk_mhz,
+ &num_levels);
+
+ // DPREFCLK ???
+
+ /* DISPCLK */
+ dcn3_init_single_clock(clk_mgr, PPCLK_DISPCLK,
+ &clk_mgr_base->bw_params->clk_table.entries[0].dispclk_mhz,
+ &num_levels);
+
+ /* DPPCLK */
+ dcn3_init_single_clock(clk_mgr, PPCLK_PIXCLK,
+ &clk_mgr_base->bw_params->clk_table.entries[0].dppclk_mhz,
+ &num_levels);
+
+ /* PHYCLK */
+ dcn3_init_single_clock(clk_mgr, PPCLK_PHYCLK,
+ &clk_mgr_base->bw_params->clk_table.entries[0].phyclk_mhz,
+ &num_levels);
+
+ /* Get UCLK, update bounding box */
+ clk_mgr_base->funcs->get_memclk_states_from_smu(clk_mgr_base);
+
+ /* WM range table */
+ dcn3_build_wm_range_table(clk_mgr);
+}
+
+static int dcn30_get_vco_frequency_from_reg(struct clk_mgr_internal *clk_mgr)
+{
+ /* get FbMult value */
+ struct fixed31_32 pll_req;
+ /* get FbMult value */
+ uint32_t pll_req_reg = REG_READ(CLK0_CLK_PLL_REQ);
+
+ /* set up a fixed-point number
+ * this works because the int part is on the right edge of the register
+ * and the frac part is on the left edge
+ */
+ pll_req = dc_fixpt_from_int(pll_req_reg & clk_mgr->clk_mgr_mask->FbMult_int);
+ pll_req.value |= pll_req_reg & clk_mgr->clk_mgr_mask->FbMult_frac;
+
+ /* multiply by REFCLK period */
+ pll_req = dc_fixpt_mul_int(pll_req, clk_mgr->dfs_ref_freq_khz);
+
+ return dc_fixpt_floor(pll_req);
+}
+
+static void dcn3_update_clocks(struct clk_mgr *clk_mgr_base,
+ struct dc_state *context,
+ bool safe_to_lower)
+{
+ struct clk_mgr_internal *clk_mgr = TO_CLK_MGR_INTERNAL(clk_mgr_base);
+ struct dc_clocks *new_clocks = &context->bw_ctx.bw.dcn.clk;
+ struct dc *dc = clk_mgr_base->ctx->dc;
+ int display_count;
+ bool update_dppclk = false;
+ bool update_dispclk = false;
+ bool enter_display_off = false;
+ bool dpp_clock_lowered = false;
+ struct dmcu *dmcu = clk_mgr_base->ctx->dc->res_pool->dmcu;
+ bool force_reset = false;
+ bool update_uclk = false;
+
+ if (dc->work_arounds.skip_clock_update || !clk_mgr->smu_present)
+ return;
+
+ if (clk_mgr_base->clks.dispclk_khz == 0 ||
+ (dc->debug.force_clock_mode & 0x1)) {
+ /* this is from resume or boot up, if forced_clock cfg option used, we bypass program dispclk and DPPCLK, but need set them for S3. */
+ force_reset = true;
+
+ dcn2_read_clocks_from_hw_dentist(clk_mgr_base);
+
+ /* force_clock_mode 0x1: force reset the clock even it is the same clock as long as it is in Passive level. */
+ }
+ display_count = clk_mgr_helper_get_active_display_cnt(dc, context);
+
+ if (display_count == 0)
+ enter_display_off = true;
+
+ if (enter_display_off == safe_to_lower)
+ dcn30_smu_set_num_of_displays(clk_mgr, display_count);
+
+ if (should_set_clock(safe_to_lower, new_clocks->phyclk_khz, clk_mgr_base->clks.phyclk_khz)) {
+ clk_mgr_base->clks.phyclk_khz = new_clocks->phyclk_khz;
+ dcn30_smu_set_hard_min_by_freq(clk_mgr, PPCLK_PHYCLK, clk_mgr_base->clks.phyclk_khz / 1000);
+ }
+
+ if (dc->debug.force_min_dcfclk_mhz > 0)
+ new_clocks->dcfclk_khz = (new_clocks->dcfclk_khz > (dc->debug.force_min_dcfclk_mhz * 1000)) ?
+ new_clocks->dcfclk_khz : (dc->debug.force_min_dcfclk_mhz * 1000);
+
+ if (should_set_clock(safe_to_lower, new_clocks->dcfclk_khz, clk_mgr_base->clks.dcfclk_khz)) {
+ clk_mgr_base->clks.dcfclk_khz = new_clocks->dcfclk_khz;
+ dcn30_smu_set_hard_min_by_freq(clk_mgr, PPCLK_DCEFCLK, clk_mgr_base->clks.dcfclk_khz / 1000);
+ }
+
+ if (should_set_clock(safe_to_lower, new_clocks->dcfclk_deep_sleep_khz, clk_mgr_base->clks.dcfclk_deep_sleep_khz)) {
+ clk_mgr_base->clks.dcfclk_deep_sleep_khz = new_clocks->dcfclk_deep_sleep_khz;
+ dcn30_smu_set_min_deep_sleep_dcef_clk(clk_mgr, clk_mgr_base->clks.dcfclk_deep_sleep_khz / 1000);
+ }
+
+ if (should_set_clock(safe_to_lower, new_clocks->socclk_khz, clk_mgr_base->clks.socclk_khz))
+ /* We don't actually care about socclk, don't notify SMU of hard min */
+ clk_mgr_base->clks.socclk_khz = new_clocks->socclk_khz;
+
+ clk_mgr_base->clks.prev_p_state_change_support = clk_mgr_base->clks.p_state_change_support;
+ if (should_update_pstate_support(safe_to_lower, new_clocks->p_state_change_support, clk_mgr_base->clks.p_state_change_support)) {
+ clk_mgr_base->clks.p_state_change_support = new_clocks->p_state_change_support;
+
+ /* to disable P-State switching, set UCLK min = max */
+ if (!clk_mgr_base->clks.p_state_change_support)
+ dcn30_smu_set_hard_min_by_freq(clk_mgr, PPCLK_UCLK,
+ clk_mgr_base->bw_params->clk_table.entries[clk_mgr_base->bw_params->clk_table.num_entries - 1].memclk_mhz);
+ }
+
+ /* Always update saved value, even if new value not set due to P-State switching unsupported */
+ if (should_set_clock(safe_to_lower, new_clocks->dramclk_khz, clk_mgr_base->clks.dramclk_khz)) {
+ clk_mgr_base->clks.dramclk_khz = new_clocks->dramclk_khz;
+ update_uclk = true;
+ }
+
+ /* set UCLK to requested value if P-State switching is supported, or to re-enable P-State switching */
+ if (clk_mgr_base->clks.p_state_change_support &&
+ (update_uclk || !clk_mgr_base->clks.prev_p_state_change_support))
+ dcn30_smu_set_hard_min_by_freq(clk_mgr, PPCLK_UCLK, clk_mgr_base->clks.dramclk_khz / 1000);
+
+ if (should_set_clock(safe_to_lower, new_clocks->dppclk_khz, clk_mgr_base->clks.dppclk_khz)) {
+ if (clk_mgr_base->clks.dppclk_khz > new_clocks->dppclk_khz)
+ dpp_clock_lowered = true;
+
+ clk_mgr_base->clks.dppclk_khz = new_clocks->dppclk_khz;
+ dcn30_smu_set_hard_min_by_freq(clk_mgr, PPCLK_PIXCLK, clk_mgr_base->clks.dppclk_khz / 1000);
+ update_dppclk = true;
+ }
+
+ if (should_set_clock(safe_to_lower, new_clocks->dispclk_khz, clk_mgr_base->clks.dispclk_khz)) {
+ clk_mgr_base->clks.dispclk_khz = new_clocks->dispclk_khz;
+ dcn30_smu_set_hard_min_by_freq(clk_mgr, PPCLK_DISPCLK, clk_mgr_base->clks.dispclk_khz / 1000);
+ update_dispclk = true;
+ }
+
+ if (dc->config.forced_clocks == false || (force_reset && safe_to_lower)) {
+ if (dpp_clock_lowered) {
+ /* if clock is being lowered, increase DTO before lowering refclk */
+ dcn20_update_clocks_update_dpp_dto(clk_mgr, context, safe_to_lower);
+ dcn20_update_clocks_update_dentist(clk_mgr);
+ } else {
+ /* if clock is being raised, increase refclk before lowering DTO */
+ if (update_dppclk || update_dispclk)
+ dcn20_update_clocks_update_dentist(clk_mgr);
+ /* always update dtos unless clock is lowered and not safe to lower */
+ if (new_clocks->dppclk_khz >= dc->current_state->bw_ctx.bw.dcn.clk.dppclk_khz)
+ dcn20_update_clocks_update_dpp_dto(clk_mgr, context, safe_to_lower);
+ }
+ }
+
+ if (update_dispclk && dmcu && dmcu->funcs->is_dmcu_initialized(dmcu))
+ /*update dmcu for wait_loop count*/
+ dmcu->funcs->set_psr_wait_loop(dmcu,
+ clk_mgr_base->clks.dispclk_khz / 1000 / 7);
+}
+
+
+static void dcn3_notify_wm_ranges(struct clk_mgr *clk_mgr_base)
+{
+ unsigned int i;
+ long long table_addr;
+ WatermarksExternal_t *table;
+ struct clk_mgr_internal *clk_mgr = TO_CLK_MGR_INTERNAL(clk_mgr_base);
+
+ if (!clk_mgr->smu_present)
+ return;
+
+ /* need physical address of table to give to PMFW */
+ table = (WatermarksExternal_t *) dm_helpers_allocate_gpu_mem(clk_mgr->base.ctx, DC_MEM_ALLOC_TYPE_GART, sizeof(WatermarksExternal_t), &table_addr);
+
+ if (!table)
+ // should log failure
+ return;
+
+ memset(table, 0, sizeof(*table));
+
+ /* collect valid ranges, place in pmfw table */
+ for (i = 0; i < WM_SET_COUNT; i++)
+ if (clk_mgr->base.bw_params->wm_table.nv_entries[i].valid) {
+ table->Watermarks.WatermarkRow[WM_DCEFCLK][i].MinClock = clk_mgr->base.bw_params->wm_table.nv_entries[i].pmfw_breakdown.min_dcfclk;
+ table->Watermarks.WatermarkRow[WM_DCEFCLK][i].MaxClock = clk_mgr->base.bw_params->wm_table.nv_entries[i].pmfw_breakdown.max_dcfclk;
+ table->Watermarks.WatermarkRow[WM_DCEFCLK][i].MinUclk = clk_mgr->base.bw_params->wm_table.nv_entries[i].pmfw_breakdown.min_uclk;
+ table->Watermarks.WatermarkRow[WM_DCEFCLK][i].MaxUclk = clk_mgr->base.bw_params->wm_table.nv_entries[i].pmfw_breakdown.max_uclk;
+ table->Watermarks.WatermarkRow[WM_DCEFCLK][i].WmSetting = i;
+ table->Watermarks.WatermarkRow[WM_DCEFCLK][i].Flags = clk_mgr->base.bw_params->wm_table.nv_entries[i].pmfw_breakdown.wm_type;
+ }
+
+ dcn30_smu_set_dram_addr_high(clk_mgr, table_addr >> 32);
+ dcn30_smu_set_dram_addr_low(clk_mgr, table_addr & 0xFFFFFFFF);
+ dcn30_smu_transfer_wm_table_dram_2_smu(clk_mgr);
+
+ dm_helpers_free_gpu_mem(clk_mgr->base.ctx, DC_MEM_ALLOC_TYPE_GART, table);
+}
+
+/* Set min memclk to minimum, either constrained by the current mode or DPM0 */
+static void dcn3_set_hard_min_memclk(struct clk_mgr *clk_mgr_base, bool current_mode)
+{
+ struct clk_mgr_internal *clk_mgr = TO_CLK_MGR_INTERNAL(clk_mgr_base);
+
+ if (!clk_mgr->smu_present)
+ return;
+
+ if (current_mode)
+ dcn30_smu_set_hard_min_by_freq(clk_mgr, PPCLK_UCLK,
+ clk_mgr_base->clks.dramclk_khz / 1000);
+ else
+ dcn30_smu_set_hard_min_by_freq(clk_mgr, PPCLK_UCLK,
+ clk_mgr_base->bw_params->clk_table.entries[0].memclk_mhz);
+}
+
+/* Set max memclk to highest DPM value */
+static void dcn3_set_hard_max_memclk(struct clk_mgr *clk_mgr_base)
+{
+ struct clk_mgr_internal *clk_mgr = TO_CLK_MGR_INTERNAL(clk_mgr_base);
+
+ if (!clk_mgr->smu_present)
+ return;
+
+ dcn30_smu_set_hard_max_by_freq(clk_mgr, PPCLK_UCLK,
+ clk_mgr_base->bw_params->clk_table.entries[clk_mgr_base->bw_params->clk_table.num_entries - 1].memclk_mhz);
+}
+
+/* Get current memclk states, update bounding box */
+static void dcn3_get_memclk_states_from_smu(struct clk_mgr *clk_mgr_base)
+{
+ struct clk_mgr_internal *clk_mgr = TO_CLK_MGR_INTERNAL(clk_mgr_base);
+ unsigned int num_levels;
+
+ if (!clk_mgr->smu_present)
+ return;
+
+ /* Refresh memclk states */
+ dcn3_init_single_clock(clk_mgr, PPCLK_UCLK,
+ &clk_mgr_base->bw_params->clk_table.entries[0].memclk_mhz,
+ &num_levels);
+ clk_mgr_base->bw_params->clk_table.num_entries = num_levels ? num_levels : 1;
+
+ /* Refresh bounding box */
+ clk_mgr_base->ctx->dc->res_pool->funcs->update_bw_bounding_box(
+ clk_mgr->base.ctx->dc, clk_mgr_base->bw_params);
+}
+
+static bool dcn3_are_clock_states_equal(struct dc_clocks *a,
+ struct dc_clocks *b)
+{
+ if (a->dispclk_khz != b->dispclk_khz)
+ return false;
+ else if (a->dppclk_khz != b->dppclk_khz)
+ return false;
+ else if (a->dcfclk_khz != b->dcfclk_khz)
+ return false;
+ else if (a->dcfclk_deep_sleep_khz != b->dcfclk_deep_sleep_khz)
+ return false;
+ else if (a->phyclk_khz != b->phyclk_khz)
+ return false;
+ else if (a->dramclk_khz != b->dramclk_khz)
+ return false;
+ else if (a->p_state_change_support != b->p_state_change_support)
+ return false;
+
+ return true;
+}
+
+static void dcn3_enable_pme_wa(struct clk_mgr *clk_mgr_base)
+{
+ struct clk_mgr_internal *clk_mgr = TO_CLK_MGR_INTERNAL(clk_mgr_base);
+
+ if (!clk_mgr->smu_present)
+ return;
+
+ dcn30_smu_set_pme_workaround(clk_mgr);
+}
+
+static struct clk_mgr_funcs dcn3_funcs = {
+ .get_dp_ref_clk_frequency = dce12_get_dp_ref_freq_khz,
+ .update_clocks = dcn3_update_clocks,
+ .init_clocks = dcn3_init_clocks,
+ .notify_wm_ranges = dcn3_notify_wm_ranges,
+ .set_hard_min_memclk = dcn3_set_hard_min_memclk,
+ .set_hard_max_memclk = dcn3_set_hard_max_memclk,
+ .get_memclk_states_from_smu = dcn3_get_memclk_states_from_smu,
+ .are_clock_states_equal = dcn3_are_clock_states_equal,
+ .enable_pme_wa = dcn3_enable_pme_wa
+};
+
+static void dcn3_init_clocks_fpga(struct clk_mgr *clk_mgr)
+{
+ dcn2_init_clocks(clk_mgr);
+
+/* TODO: Implement the functions and remove the ifndef guard */
+}
+
+static struct clk_mgr_funcs dcn3_fpga_funcs = {
+ .get_dp_ref_clk_frequency = dce12_get_dp_ref_freq_khz,
+ .update_clocks = dcn2_update_clocks_fpga,
+ .init_clocks = dcn3_init_clocks_fpga,
+};
+
+/*todo for dcn30 for clk register offset*/
+void dcn3_clk_mgr_construct(
+ struct dc_context *ctx,
+ struct clk_mgr_internal *clk_mgr,
+ struct pp_smu_funcs *pp_smu,
+ struct dccg *dccg)
+{
+ clk_mgr->base.ctx = ctx;
+ clk_mgr->base.funcs = &dcn3_funcs;
+ clk_mgr->regs = &clk_mgr_regs;
+ clk_mgr->clk_mgr_shift = &clk_mgr_shift;
+ clk_mgr->clk_mgr_mask = &clk_mgr_mask;
+
+ clk_mgr->dccg = dccg;
+ clk_mgr->dfs_bypass_disp_clk = 0;
+
+ clk_mgr->dprefclk_ss_percentage = 0;
+ clk_mgr->dprefclk_ss_divider = 1000;
+ clk_mgr->ss_on_dprefclk = false;
+ clk_mgr->dfs_ref_freq_khz = 100000;
+
+ clk_mgr->base.dprefclk_khz = 730000; // 700 MHz planned if VCO is 3.85 GHz, will be retrieved
+
+ if (IS_FPGA_MAXIMUS_DC(ctx->dce_environment)) {
+ clk_mgr->base.funcs = &dcn3_fpga_funcs;
+ clk_mgr->base.dentist_vco_freq_khz = 3650000;
+
+ } else {
+ struct clk_state_registers_and_bypass s = { 0 };
+
+ /* integer part is now VCO frequency in kHz */
+ clk_mgr->base.dentist_vco_freq_khz = dcn30_get_vco_frequency_from_reg(clk_mgr);
+
+ /* in case we don't get a value from the register, use default */
+ if (clk_mgr->base.dentist_vco_freq_khz == 0)
+ clk_mgr->base.dentist_vco_freq_khz = 3650000;
+ /* Convert dprefclk units from MHz to KHz */
+ /* Value already divided by 10, some resolution lost */
+
+ /*TODO: uncomment assert once dcn3_dump_clk_registers is implemented */
+ //ASSERT(s.dprefclk != 0);
+ if (s.dprefclk != 0)
+ clk_mgr->base.dprefclk_khz = s.dprefclk * 1000;
+ }
+
+ clk_mgr->dfs_bypass_enabled = false;
+
+ clk_mgr->smu_present = false;
+
+ dce_clock_read_ss_info(clk_mgr);
+
+ clk_mgr->base.bw_params = kzalloc(sizeof(*clk_mgr->base.bw_params), GFP_KERNEL);
+}
+
+void dcn3_clk_mgr_destroy(struct clk_mgr_internal *clk_mgr)
+{
+ if (clk_mgr->base.bw_params)
+ kfree(clk_mgr->base.bw_params);
+}
diff --git a/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd_vbios.h b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn30/dcn30_clk_mgr.h
index b6deb8e2590f..dd4a0bd72458 100644
--- a/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd_vbios.h
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn30/dcn30_clk_mgr.h
@@ -1,5 +1,5 @@
/*
- * Copyright 2019 Advanced Micro Devices, Inc.
+ * Copyright 2020 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
@@ -23,19 +23,16 @@
*
*/
-#ifndef _DMUB_CMD_VBIOS_H_
-#define _DMUB_CMD_VBIOS_H_
+#ifndef __DCN30_CLK_MGR_H__
+#define __DCN30_CLK_MGR_H__
-/*
- * Command IDs should be treated as stable ABI.
- * Do not reuse or modify IDs.
- */
+void dcn3_init_clocks(struct clk_mgr *clk_mgr_base);
+
+void dcn3_clk_mgr_construct(struct dc_context *ctx,
+ struct clk_mgr_internal *clk_mgr,
+ struct pp_smu_funcs *pp_smu,
+ struct dccg *dccg);
-enum dmub_cmd_vbios_type {
- DMUB_CMD__VBIOS_DIGX_ENCODER_CONTROL = 0,
- DMUB_CMD__VBIOS_DIG1_TRANSMITTER_CONTROL = 1,
- DMUB_CMD__VBIOS_SET_PIXEL_CLOCK = 2,
- DMUB_CMD__VBIOS_ENABLE_DISP_POWER_GATING = 3,
-};
+void dcn3_clk_mgr_destroy(struct clk_mgr_internal *clk_mgr);
-#endif /* _DMUB_CMD_VBIOS_H_ */
+#endif //__DCN30_CLK_MGR_H__
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn30/dcn30_clk_mgr_smu_msg.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn30/dcn30_clk_mgr_smu_msg.c
new file mode 100644
index 000000000000..986c53a3b6a8
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn30/dcn30_clk_mgr_smu_msg.c
@@ -0,0 +1,255 @@
+/*
+ * Copyright 2020 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#include <linux/delay.h>
+#include "dcn30_clk_mgr_smu_msg.h"
+
+#include "clk_mgr_internal.h"
+#include "reg_helper.h"
+#include "dalsmc.h"
+
+#define mmDAL_MSG_REG 0x1628A
+#define mmDAL_ARG_REG 0x16273
+#define mmDAL_RESP_REG 0x16274
+
+#define REG(reg_name) \
+ mm ## reg_name
+
+/*
+ * Function to be used instead of REG_WAIT macro because the wait ends when
+ * the register is NOT EQUAL to zero, and because the translation in msg_if.h
+ * won't work with REG_WAIT.
+ */
+static uint32_t dcn30_smu_wait_for_response(struct clk_mgr_internal *clk_mgr, unsigned int delay_us, unsigned int max_retries)
+{
+ uint32_t reg = 0;
+
+ do {
+ reg = REG_READ(DAL_RESP_REG);
+ if (reg)
+ break;
+
+ if (delay_us >= 1000)
+ msleep(delay_us/1000);
+ else if (delay_us > 0)
+ udelay(delay_us);
+ } while (max_retries--);
+
+ /* handle DALSMC_Result_CmdRejectedBusy? */
+
+ /* Log? */
+
+ return reg;
+}
+
+static bool dcn30_smu_send_msg_with_param(struct clk_mgr_internal *clk_mgr, uint32_t msg_id, uint32_t param_in, uint32_t *param_out)
+{
+ /* Wait for response register to be ready */
+ dcn30_smu_wait_for_response(clk_mgr, 10, 200000);
+
+ /* Clear response register */
+ REG_WRITE(DAL_RESP_REG, 0);
+
+ /* Set the parameter register for the SMU message */
+ REG_WRITE(DAL_ARG_REG, param_in);
+
+ /* Trigger the message transaction by writing the message ID */
+ REG_WRITE(DAL_MSG_REG, msg_id);
+
+ /* Wait for response */
+ if (dcn30_smu_wait_for_response(clk_mgr, 10, 200000) == DALSMC_Result_OK) {
+ if (param_out)
+ *param_out = REG_READ(DAL_ARG_REG);
+
+ return true;
+ }
+
+ return false;
+}
+
+/* Test message should return input + 1 */
+bool dcn30_smu_test_message(struct clk_mgr_internal *clk_mgr, uint32_t input)
+{
+ uint32_t response = 0;
+
+ if (dcn30_smu_send_msg_with_param(clk_mgr,
+ DALSMC_MSG_TestMessage, input, &response))
+ if (response == input + 1)
+ return true;
+
+ return false;
+}
+
+bool dcn30_smu_get_smu_version(struct clk_mgr_internal *clk_mgr, unsigned int *version)
+{
+ if (dcn30_smu_send_msg_with_param(clk_mgr,
+ DALSMC_MSG_GetSmuVersion, 0, version))
+ return true;
+
+ return false;
+}
+
+/* Message output should match SMU11_DRIVER_IF_VERSION in smu11_driver_if.h */
+bool dcn30_smu_check_driver_if_version(struct clk_mgr_internal *clk_mgr)
+{
+ uint32_t response = 0;
+
+ if (dcn30_smu_send_msg_with_param(clk_mgr,
+ DALSMC_MSG_GetDriverIfVersion, 0, &response))
+ if (response == SMU11_DRIVER_IF_VERSION)
+ return true;
+
+ return false;
+}
+
+/* Message output should match DALSMC_VERSION in dalsmc.h */
+bool dcn30_smu_check_msg_header_version(struct clk_mgr_internal *clk_mgr)
+{
+ uint32_t response = 0;
+
+ if (dcn30_smu_send_msg_with_param(clk_mgr,
+ DALSMC_MSG_GetMsgHeaderVersion, 0, &response))
+ if (response == DALSMC_VERSION)
+ return true;
+
+ return false;
+}
+
+void dcn30_smu_set_dram_addr_high(struct clk_mgr_internal *clk_mgr, uint32_t addr_high)
+{
+ dcn30_smu_send_msg_with_param(clk_mgr,
+ DALSMC_MSG_SetDalDramAddrHigh, addr_high, NULL);
+}
+
+void dcn30_smu_set_dram_addr_low(struct clk_mgr_internal *clk_mgr, uint32_t addr_low)
+{
+ dcn30_smu_send_msg_with_param(clk_mgr,
+ DALSMC_MSG_SetDalDramAddrLow, addr_low, NULL);
+}
+
+void dcn30_smu_transfer_wm_table_smu_2_dram(struct clk_mgr_internal *clk_mgr)
+{
+ dcn30_smu_send_msg_with_param(clk_mgr,
+ DALSMC_MSG_TransferTableSmu2Dram, TABLE_WATERMARKS, NULL);
+}
+
+void dcn30_smu_transfer_wm_table_dram_2_smu(struct clk_mgr_internal *clk_mgr)
+{
+ dcn30_smu_send_msg_with_param(clk_mgr,
+ DALSMC_MSG_TransferTableDram2Smu, TABLE_WATERMARKS, NULL);
+}
+
+/* Returns the actual frequency that was set in MHz, 0 on failure */
+unsigned int dcn30_smu_set_hard_min_by_freq(struct clk_mgr_internal *clk_mgr, PPCLK_e clk, uint16_t freq_mhz)
+{
+ uint32_t response = 0;
+
+ /* bits 23:16 for clock type, lower 16 bits for frequency in MHz */
+ uint32_t param = (clk << 16) | freq_mhz;
+
+ dcn30_smu_send_msg_with_param(clk_mgr,
+ DALSMC_MSG_SetHardMinByFreq, param, &response);
+
+ return response;
+}
+
+/* Returns the actual frequency that was set in MHz, 0 on failure */
+unsigned int dcn30_smu_set_hard_max_by_freq(struct clk_mgr_internal *clk_mgr, PPCLK_e clk, uint16_t freq_mhz)
+{
+ uint32_t response = 0;
+
+ /* bits 23:16 for clock type, lower 16 bits for frequency in MHz */
+ uint32_t param = (clk << 16) | freq_mhz;
+
+ dcn30_smu_send_msg_with_param(clk_mgr,
+ DALSMC_MSG_SetHardMaxByFreq, param, &response);
+
+ return response;
+}
+
+/*
+ * Frequency in MHz returned in lower 16 bits for valid DPM level
+ *
+ * Call with dpm_level = 0xFF to query features, return value will be:
+ * Bits 7:0 - number of DPM levels
+ * Bit 28 - 1 = auto DPM on
+ * Bit 29 - 1 = sweep DPM on
+ * Bit 30 - 1 = forced DPM on
+ * Bit 31 - 0 = discrete, 1 = fine-grained
+ *
+ * With fine-grained DPM, only min and max frequencies will be reported
+ *
+ * Returns 0 on failure
+ */
+unsigned int dcn30_smu_get_dpm_freq_by_index(struct clk_mgr_internal *clk_mgr, PPCLK_e clk, uint8_t dpm_level)
+{
+ uint32_t response = 0;
+
+ /* bits 23:16 for clock type, lower 8 bits for DPM level */
+ uint32_t param = (clk << 16) | dpm_level;
+
+ dcn30_smu_send_msg_with_param(clk_mgr,
+ DALSMC_MSG_GetDpmFreqByIndex, param, &response);
+
+ return response;
+}
+
+/* Returns the max DPM frequency in DC mode in MHz, 0 on failure */
+unsigned int dcn30_smu_get_dc_mode_max_dpm_freq(struct clk_mgr_internal *clk_mgr, PPCLK_e clk)
+{
+ uint32_t response = 0;
+
+ /* bits 23:16 for clock type */
+ uint32_t param = clk << 16;
+
+ dcn30_smu_send_msg_with_param(clk_mgr,
+ DALSMC_MSG_GetDcModeMaxDpmFreq, param, &response);
+
+ return response;
+}
+
+void dcn30_smu_set_min_deep_sleep_dcef_clk(struct clk_mgr_internal *clk_mgr, uint32_t freq_mhz)
+{
+ dcn30_smu_send_msg_with_param(clk_mgr,
+ DALSMC_MSG_SetMinDeepSleepDcefclk, freq_mhz, NULL);
+}
+
+void dcn30_smu_set_num_of_displays(struct clk_mgr_internal *clk_mgr, uint32_t num_displays)
+{
+ dcn30_smu_send_msg_with_param(clk_mgr,
+ DALSMC_MSG_NumOfDisplays, num_displays, NULL);
+}
+
+void dcn30_smu_set_external_client_df_cstate_allow(struct clk_mgr_internal *clk_mgr, bool enable)
+{
+ dcn30_smu_send_msg_with_param(clk_mgr,
+ DALSMC_MSG_SetExternalClientDfCstateAllow, enable ? 1 : 0, NULL);
+}
+
+void dcn30_smu_set_pme_workaround(struct clk_mgr_internal *clk_mgr)
+{
+ dcn30_smu_send_msg_with_param(clk_mgr,
+ DALSMC_MSG_BacoAudioD3PME, 0, NULL);
+}
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn30/dcn30_clk_mgr_smu_msg.h b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn30/dcn30_clk_mgr_smu_msg.h
new file mode 100644
index 000000000000..236f20ec90d4
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn30/dcn30_clk_mgr_smu_msg.h
@@ -0,0 +1,108 @@
+/*
+ * Copyright 2020 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef DAL_DC_DCN30_CLK_MGR_SMU_MSG_H_
+#define DAL_DC_DCN30_CLK_MGR_SMU_MSG_H_
+
+#include "core_types.h"
+
+#define SMU11_DRIVER_IF_VERSION 0x1F
+
+typedef enum {
+ PPCLK_GFXCLK = 0,
+ PPCLK_SOCCLK,
+ PPCLK_UCLK,
+ PPCLK_FCLK,
+ PPCLK_DCLK_0,
+ PPCLK_VCLK_0,
+ PPCLK_DCLK_1,
+ PPCLK_VCLK_1,
+ PPCLK_DCEFCLK,
+ PPCLK_DISPCLK,
+ PPCLK_PIXCLK,
+ PPCLK_PHYCLK,
+ PPCLK_DTBCLK,
+ PPCLK_COUNT,
+} PPCLK_e;
+
+typedef struct {
+ uint16_t MinClock; // This is either DCEFCLK or SOCCLK (in MHz)
+ uint16_t MaxClock; // This is either DCEFCLK or SOCCLK (in MHz)
+ uint16_t MinUclk;
+ uint16_t MaxUclk;
+
+ uint8_t WmSetting;
+ uint8_t Flags;
+ uint8_t Padding[2];
+
+} WatermarkRowGeneric_t;
+
+#define NUM_WM_RANGES 4
+
+typedef enum {
+ WM_SOCCLK = 0,
+ WM_DCEFCLK,
+ WM_COUNT,
+} WM_CLOCK_e;
+
+typedef enum {
+ WATERMARKS_CLOCK_RANGE = 0,
+ WATERMARKS_DUMMY_PSTATE,
+ WATERMARKS_COUNT,
+} WATERMARKS_FLAGS_e;
+
+typedef struct {
+ // Watermarks
+ WatermarkRowGeneric_t WatermarkRow[WM_COUNT][NUM_WM_RANGES];
+} Watermarks_t;
+
+typedef struct {
+ Watermarks_t Watermarks;
+
+ uint32_t MmHubPadding[8]; // SMU internal use
+} WatermarksExternal_t;
+
+#define TABLE_WATERMARKS 1
+
+struct clk_mgr_internal;
+
+bool dcn30_smu_test_message(struct clk_mgr_internal *clk_mgr, uint32_t input);
+bool dcn30_smu_get_smu_version(struct clk_mgr_internal *clk_mgr, unsigned int *version);
+bool dcn30_smu_check_driver_if_version(struct clk_mgr_internal *clk_mgr);
+bool dcn30_smu_check_msg_header_version(struct clk_mgr_internal *clk_mgr);
+void dcn30_smu_set_dram_addr_high(struct clk_mgr_internal *clk_mgr, uint32_t addr_high);
+void dcn30_smu_set_dram_addr_low(struct clk_mgr_internal *clk_mgr, uint32_t addr_low);
+void dcn30_smu_transfer_wm_table_smu_2_dram(struct clk_mgr_internal *clk_mgr);
+void dcn30_smu_transfer_wm_table_dram_2_smu(struct clk_mgr_internal *clk_mgr);
+unsigned int dcn30_smu_set_hard_min_by_freq(struct clk_mgr_internal *clk_mgr, PPCLK_e clk, uint16_t freq_mhz);
+unsigned int dcn30_smu_set_hard_max_by_freq(struct clk_mgr_internal *clk_mgr, PPCLK_e clk, uint16_t freq_mhz);
+unsigned int dcn30_smu_get_dpm_freq_by_index(struct clk_mgr_internal *clk_mgr, PPCLK_e clk, uint8_t dpm_level);
+unsigned int dcn30_smu_get_dc_mode_max_dpm_freq(struct clk_mgr_internal *clk_mgr, PPCLK_e clk);
+void dcn30_smu_set_min_deep_sleep_dcef_clk(struct clk_mgr_internal *clk_mgr, uint32_t freq_mhz);
+void dcn30_smu_set_num_of_displays(struct clk_mgr_internal *clk_mgr, uint32_t num_displays);
+void dcn30_smu_set_external_client_df_cstate_allow(struct clk_mgr_internal *clk_mgr, bool enable);
+void dcn30_smu_set_pme_workaround(struct clk_mgr_internal *clk_mgr);
+
+#endif /* DAL_DC_DCN30_CLK_MGR_SMU_MSG_H_ */
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc.c b/drivers/gpu/drm/amd/display/dc/core/dc.c
index 6f93a6ca4cf0..67402d75e67e 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc.c
@@ -68,6 +68,8 @@
#include "dmub/dmub_srv.h"
+#include "dce/dmub_hw_lock_mgr.h"
+
#define CTX \
dc->ctx
@@ -186,9 +188,10 @@ static bool create_links(
bool should_destory_link = false;
if (link->connector_signal == SIGNAL_TYPE_EDP) {
- if (dc->config.edp_not_connected)
- should_destory_link = true;
- else if (dc->debug.remove_disconnect_edp) {
+ if (dc->config.edp_not_connected) {
+ if (!IS_DIAG_DC(dc->ctx->dce_environment))
+ should_destory_link = true;
+ } else {
enum dc_connection_type type;
dc_link_detect_sink(link, &type);
if (type == dc_connection_none)
@@ -728,6 +731,9 @@ static bool dc_construct(struct dc *dc,
dc->clk_mgr = dc_clk_mgr_create(dc->ctx, dc->res_pool->pp_smu, dc->res_pool->dccg);
if (!dc->clk_mgr)
goto fail;
+#ifdef CONFIG_DRM_AMD_DC_DCN3_0
+ dc->clk_mgr->force_smu_not_present = init_params->force_smu_not_present;
+#endif
if (dc->res_pool->funcs->update_bw_bounding_box)
dc->res_pool->funcs->update_bw_bounding_box(dc, dc->clk_mgr->bw_params);
@@ -1244,7 +1250,6 @@ static enum dc_status dc_commit_state_no_check(struct dc *dc, struct dc_state *c
int i, k, l;
struct dc_stream_state *dc_streams[MAX_STREAMS] = {0};
- disable_dangling_plane(dc, context);
for (i = 0; i < context->stream_count; i++)
dc_streams[i] = context->streams[i];
@@ -1260,6 +1265,7 @@ static enum dc_status dc_commit_state_no_check(struct dc *dc, struct dc_state *c
if (dc->optimize_seamless_boot_streams == 0)
dc->hwss.prepare_bandwidth(dc, context);
+ disable_dangling_plane(dc, context);
/* re-program planes for existing stream, in case we need to
* free up plane resource for later use
*/
@@ -1382,6 +1388,43 @@ bool dc_commit_state(struct dc *dc, struct dc_state *context)
return (result == DC_OK);
}
+#if defined(CONFIG_DRM_AMD_DC_DCN3_0)
+bool dc_acquire_release_mpc_3dlut(
+ struct dc *dc, bool acquire,
+ struct dc_stream_state *stream,
+ struct dc_3dlut **lut,
+ struct dc_transfer_func **shaper)
+{
+ int pipe_idx;
+ bool ret = false;
+ bool found_pipe_idx = false;
+ const struct resource_pool *pool = dc->res_pool;
+ struct resource_context *res_ctx = &dc->current_state->res_ctx;
+ int mpcc_id = 0;
+
+ if (pool && res_ctx) {
+ if (acquire) {
+ /*find pipe idx for the given stream*/
+ for (pipe_idx = 0; pipe_idx < pool->pipe_count; pipe_idx++) {
+ if (res_ctx->pipe_ctx[pipe_idx].stream == stream) {
+ found_pipe_idx = true;
+ mpcc_id = res_ctx->pipe_ctx[pipe_idx].plane_res.hubp->inst;
+ break;
+ }
+ }
+ } else
+ found_pipe_idx = true;/*for release pipe_idx is not required*/
+
+ if (found_pipe_idx) {
+ if (acquire && pool->funcs->acquire_post_bldn_3dlut)
+ ret = pool->funcs->acquire_post_bldn_3dlut(res_ctx, pool, mpcc_id, lut, shaper);
+ else if (acquire == false && pool->funcs->release_post_bldn_3dlut)
+ ret = pool->funcs->release_post_bldn_3dlut(res_ctx, pool, lut, shaper);
+ }
+ }
+ return ret;
+}
+#endif
static bool is_flip_pending_in_pipes(struct dc *dc, struct dc_state *context)
{
int i;
@@ -2280,9 +2323,22 @@ static void commit_planes_for_stream(struct dc *dc,
}
if ((update_type != UPDATE_TYPE_FAST) && stream->update_flags.bits.dsc_changed)
- if (top_pipe_to_program->stream_res.tg->funcs->lock_doublebuffer_enable)
- top_pipe_to_program->stream_res.tg->funcs->lock_doublebuffer_enable(
- top_pipe_to_program->stream_res.tg);
+ if (top_pipe_to_program->stream_res.tg->funcs->lock_doublebuffer_enable) {
+ if (stream && should_use_dmub_lock(stream->link)) {
+ union dmub_hw_lock_flags hw_locks = { 0 };
+ struct dmub_hw_lock_inst_flags inst_flags = { 0 };
+
+ hw_locks.bits.lock_dig = 1;
+ inst_flags.dig_inst = top_pipe_to_program->stream_res.tg->inst;
+
+ dmub_hw_lock_mgr_cmd(dc->ctx->dmub_srv,
+ true,
+ &hw_locks,
+ &inst_flags);
+ } else
+ top_pipe_to_program->stream_res.tg->funcs->lock_doublebuffer_enable(
+ top_pipe_to_program->stream_res.tg);
+ }
if ((update_type != UPDATE_TYPE_FAST) && dc->hwss.interdependent_update_lock)
dc->hwss.interdependent_update_lock(dc, context, true);
@@ -2452,7 +2508,20 @@ static void commit_planes_for_stream(struct dc *dc,
top_pipe_to_program->stream_res.tg->funcs->wait_for_state(
top_pipe_to_program->stream_res.tg,
CRTC_STATE_VACTIVE);
- top_pipe_to_program->stream_res.tg->funcs->lock_doublebuffer_disable(
+
+ if (stream && should_use_dmub_lock(stream->link)) {
+ union dmub_hw_lock_flags hw_locks = { 0 };
+ struct dmub_hw_lock_inst_flags inst_flags = { 0 };
+
+ hw_locks.bits.lock_dig = 1;
+ inst_flags.dig_inst = top_pipe_to_program->stream_res.tg->inst;
+
+ dmub_hw_lock_mgr_cmd(dc->ctx->dmub_srv,
+ false,
+ &hw_locks,
+ &inst_flags);
+ } else
+ top_pipe_to_program->stream_res.tg->funcs->lock_doublebuffer_disable(
top_pipe_to_program->stream_res.tg);
}
@@ -2612,6 +2681,7 @@ void dc_interrupt_ack(struct dc *dc, enum dc_irq_source src)
dal_irq_service_ack(dc->res_pool->irqs, src);
}
+
void dc_set_power_state(
struct dc *dc,
enum dc_acpi_cm_power_state power_state)
@@ -2623,9 +2693,6 @@ void dc_set_power_state(
case DC_ACPI_CM_POWER_STATE_D0:
dc_resource_state_construct(dc, dc->current_state);
- if (dc->ctx->dmub_srv)
- dc_dmub_srv_wait_phy_init(dc->ctx->dmub_srv);
-
dc->hwss.init_hw(dc);
if (dc->hwss.init_sys_ctx != NULL &&
@@ -2840,3 +2907,51 @@ void dc_get_clock(struct dc *dc, enum dc_clock_type clock_type, struct dc_clock_
if (dc->hwss.get_clock)
dc->hwss.get_clock(dc, clock_type, clock_cfg);
}
+
+#if defined(CONFIG_DRM_AMD_DC_DCN3_0)
+
+void dc_allow_idle_optimizations(struct dc *dc, bool allow)
+{
+ if (dc->debug.disable_idle_power_optimizations)
+ return;
+
+ if (allow == dc->idle_optimizations_allowed)
+ return;
+
+ if (dc->hwss.apply_idle_power_optimizations && dc->hwss.apply_idle_power_optimizations(dc, allow))
+ dc->idle_optimizations_allowed = allow;
+}
+
+/*
+ * blank all streams, and set min and max memory clock to
+ * lowest and highest DPM level, respectively
+ */
+void dc_unlock_memory_clock_frequency(struct dc *dc)
+{
+ unsigned int i;
+
+ for (i = 0; i < MAX_PIPES; i++)
+ if (dc->current_state->res_ctx.pipe_ctx[i].plane_state)
+ core_link_disable_stream(&dc->current_state->res_ctx.pipe_ctx[i]);
+
+ dc->clk_mgr->funcs->set_hard_min_memclk(dc->clk_mgr, false);
+ dc->clk_mgr->funcs->set_hard_max_memclk(dc->clk_mgr);
+}
+
+/*
+ * set min memory clock to the min required for current mode,
+ * max to maxDPM, and unblank streams
+ */
+void dc_lock_memory_clock_frequency(struct dc *dc)
+{
+ unsigned int i;
+
+ dc->clk_mgr->funcs->get_memclk_states_from_smu(dc->clk_mgr);
+ dc->clk_mgr->funcs->set_hard_min_memclk(dc->clk_mgr, true);
+ dc->clk_mgr->funcs->set_hard_max_memclk(dc->clk_mgr);
+
+ for (i = 0; i < MAX_PIPES; i++)
+ if (dc->current_state->res_ctx.pipe_ctx[i].plane_state)
+ core_link_enable_stream(dc->current_state, &dc->current_state->res_ctx.pipe_ctx[i]);
+}
+#endif
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_debug.c b/drivers/gpu/drm/amd/display/dc/core/dc_debug.c
index 502ed3c7959d..87d89449b9af 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_debug.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_debug.c
@@ -365,3 +365,62 @@ void context_clock_trace(
context->bw_ctx.bw.dcn.clk.socclk_khz);
#endif
}
+
+/**
+ * dc_status_to_str - convert dc_status to a human readable string
+ * @status: dc_status to be converted
+ *
+ * Return:
+ * A string describing the DC status.
+ */
+char *dc_status_to_str(enum dc_status status)
+{
+ switch (status) {
+ case DC_OK:
+ return "DC OK";
+ case DC_NO_CONTROLLER_RESOURCE:
+ return "No controller resource";
+ case DC_NO_STREAM_ENC_RESOURCE:
+ return "No stream encoder";
+ case DC_NO_CLOCK_SOURCE_RESOURCE:
+ return "No clock source";
+ case DC_FAIL_CONTROLLER_VALIDATE:
+ return "Controller validation failure";
+ case DC_FAIL_ENC_VALIDATE:
+ return "Encoder validation failure";
+ case DC_FAIL_ATTACH_SURFACES:
+ return "Surfaces attachment failure";
+ case DC_FAIL_DETACH_SURFACES:
+ return "Surfaces detachment failure";
+ case DC_FAIL_SURFACE_VALIDATE:
+ return "Surface validation failure";
+ case DC_NO_DP_LINK_BANDWIDTH:
+ return "No DP link bandwidth";
+ case DC_EXCEED_DONGLE_CAP:
+ return "Exceed dongle capability";
+ case DC_SURFACE_PIXEL_FORMAT_UNSUPPORTED:
+ return "Unsupported pixel format";
+ case DC_FAIL_BANDWIDTH_VALIDATE:
+ return "Bandwidth validation failure (BW and Watermark)";
+ case DC_FAIL_SCALING:
+ return "Scaling failure";
+ case DC_FAIL_DP_LINK_TRAINING:
+ return "DP link training failure";
+ case DC_FAIL_DSC_VALIDATE:
+ return "DSC validation failure";
+ case DC_NO_DSC_RESOURCE:
+ return "No DSC resource";
+ case DC_FAIL_UNSUPPORTED_1:
+ return "Unsupported";
+ case DC_FAIL_CLK_EXCEED_MAX:
+ return "Clk exceed max failure";
+ case DC_FAIL_CLK_BELOW_MIN:
+ return "Fail clk below minimum";
+ case DC_FAIL_CLK_BELOW_CFG_REQUIRED:
+ return "Fail clk below required CFG (hard_min in PPLIB)";
+ case DC_ERROR_UNEXPECTED:
+ return "Unexpected error";
+ }
+
+ return "Unexpected status error";
+}
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link.c b/drivers/gpu/drm/amd/display/dc/core/dc_link.c
index 48ab51533d5d..02742cca4d84 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_link.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_link.c
@@ -521,11 +521,11 @@ static void link_disconnect_remap(struct dc_sink *prev_sink, struct dc_link *lin
}
#if defined(CONFIG_DRM_AMD_DC_HDCP)
-bool dc_link_is_hdcp14(struct dc_link *link)
+bool dc_link_is_hdcp14(struct dc_link *link, enum signal_type signal)
{
bool ret = false;
- switch (link->connector_signal) {
+ switch (signal) {
case SIGNAL_TYPE_DISPLAY_PORT:
case SIGNAL_TYPE_DISPLAY_PORT_MST:
ret = link->hdcp_caps.bcaps.bits.HDCP_CAPABLE;
@@ -545,11 +545,11 @@ bool dc_link_is_hdcp14(struct dc_link *link)
return ret;
}
-bool dc_link_is_hdcp22(struct dc_link *link)
+bool dc_link_is_hdcp22(struct dc_link *link, enum signal_type signal)
{
bool ret = false;
- switch (link->connector_signal) {
+ switch (signal) {
case SIGNAL_TYPE_DISPLAY_PORT:
case SIGNAL_TYPE_DISPLAY_PORT_MST:
ret = (link->hdcp_caps.bcaps.bits.HDCP_CAPABLE &&
@@ -690,12 +690,8 @@ static bool detect_dp(struct dc_link *link,
if (sink_caps->transaction_type == DDC_TRANSACTION_TYPE_I2C_OVER_AUX) {
sink_caps->signal = SIGNAL_TYPE_DISPLAY_PORT;
-
- dpcd_set_source_specific_data(link);
-
if (!detect_dp_sink_caps(link))
return false;
-
if (is_mst_supported(link)) {
sink_caps->signal = SIGNAL_TYPE_DISPLAY_PORT_MST;
link->type = dc_connection_mst_branch;
@@ -858,6 +854,7 @@ static bool dc_link_detect_helper(struct dc_link *link,
bool same_dpcd = true;
enum dc_connection_type new_connection_type = dc_connection_none;
bool perform_dp_seamless_boot = false;
+ const uint32_t post_oui_delay = 30; // 30ms
DC_LOGGER_INIT(link->ctx->logger);
@@ -870,6 +867,7 @@ static bool dc_link_detect_helper(struct dc_link *link,
// need to re-write OUI and brightness in resume case
if (link->connector_signal == SIGNAL_TYPE_EDP) {
dpcd_set_source_specific_data(link);
+ msleep(post_oui_delay);
dc_link_set_default_brightness_aux(link);
//TODO: use cached
}
@@ -925,8 +923,6 @@ static bool dc_link_detect_helper(struct dc_link *link,
case SIGNAL_TYPE_EDP: {
read_current_link_settings_on_detect(link);
- dpcd_set_source_specific_data(link);
-
detect_edp_sink_caps(link);
read_current_link_settings_on_detect(link);
sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C_OVER_AUX;
@@ -1636,6 +1632,7 @@ static enum dc_status enable_link_dp(struct dc_state *state,
int i;
bool apply_seamless_boot_optimization = false;
uint32_t bl_oled_enable_delay = 50; // in ms
+ const uint32_t post_oui_delay = 30; // 30ms
// check for seamless boot
for (i = 0; i < state->stream_count; i++) {
@@ -1662,6 +1659,8 @@ static enum dc_status enable_link_dp(struct dc_state *state,
// during mode switch we do DP_SET_POWER off then on, and OUI is lost
dpcd_set_source_specific_data(link);
+ if (link->dpcd_sink_ext_caps.raw != 0)
+ msleep(post_oui_delay);
skip_video_pattern = true;
@@ -2560,12 +2559,14 @@ bool dc_link_set_psr_allow_active(struct dc_link *link, bool allow_active, bool
struct dmcu *dmcu = dc->res_pool->dmcu;
struct dmub_psr *psr = dc->res_pool->psr;
+ link->psr_settings.psr_allow_active = allow_active;
+
if (psr != NULL && link->psr_settings.psr_feature_enabled)
psr->funcs->psr_enable(psr, allow_active);
else if ((dmcu != NULL && dmcu->funcs->is_dmcu_initialized(dmcu)) && link->psr_settings.psr_feature_enabled)
dmcu->funcs->set_psr_enable(dmcu, allow_active, wait);
-
- link->psr_settings.psr_allow_active = allow_active;
+ else
+ return false;
return true;
}
@@ -3134,6 +3135,11 @@ void core_link_enable_stream(
pipe_ctx->stream->link->link_state_valid = true;
+#if defined(CONFIG_DRM_AMD_DC_DCN3_0)
+ if (pipe_ctx->stream_res.tg->funcs->set_out_mux)
+ pipe_ctx->stream_res.tg->funcs->set_out_mux(pipe_ctx->stream_res.tg, OUT_MUX_DIO);
+#endif
+
if (dc_is_dvi_signal(pipe_ctx->stream->signal))
pipe_ctx->stream_res.stream_enc->funcs->dvi_set_stream_attribute(
pipe_ctx->stream_res.stream_enc,
@@ -3216,6 +3222,15 @@ void core_link_enable_stream(
CONTROLLER_DP_TEST_PATTERN_VIDEOMODE,
COLOR_DEPTH_UNDEFINED);
+ /* This second call is needed to reconfigure the DIG
+ * as a workaround for the incorrect value being applied
+ * from transmitter control.
+ */
+ if (!dc_is_virtual_signal(pipe_ctx->stream->signal))
+ stream->link->link_enc->funcs->setup(
+ stream->link->link_enc,
+ pipe_ctx->stream->signal);
+
dc->hwss.enable_stream(pipe_ctx);
/* Set DPS PPS SDP (AKA "info frames") */
@@ -3298,9 +3313,11 @@ void core_link_disable_stream(struct pipe_ctx *pipe_ctx)
write_i2c_redriver_setting(pipe_ctx, false);
}
}
- dc->hwss.disable_stream(pipe_ctx);
disable_link(pipe_ctx->stream->link, pipe_ctx->stream->signal);
+
+ dc->hwss.disable_stream(pipe_ctx);
+
if (pipe_ctx->stream->timing.flags.DSC) {
if (dc_is_dp_signal(pipe_ctx->stream->signal))
dp_set_dsc_enable(pipe_ctx, false);
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link_ddc.c b/drivers/gpu/drm/amd/display/dc/core/dc_link_ddc.c
index aefd29a440b5..b984eecca58b 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_link_ddc.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_link_ddc.c
@@ -503,7 +503,7 @@ bool dal_ddc_service_query_ddc_data(
uint8_t *read_buf,
uint32_t read_size)
{
- bool ret = false;
+ bool success = true;
uint32_t payload_size =
dal_ddc_service_is_in_aux_transaction_mode(ddc) ?
DEFAULT_AUX_MAX_DATA_SIZE : EDID_SEGMENT_SIZE;
@@ -527,7 +527,6 @@ bool dal_ddc_service_query_ddc_data(
* but we want to read 256 over i2c!!!!*/
if (dal_ddc_service_is_in_aux_transaction_mode(ddc)) {
struct aux_payload payload;
- bool read_available = true;
payload.i2c_over_aux = true;
payload.address = address;
@@ -536,21 +535,26 @@ bool dal_ddc_service_query_ddc_data(
if (write_size != 0) {
payload.write = true;
- payload.mot = false;
+ /* should not set mot (middle of transaction) to 0
+ * if there are pending read payloads
+ */
+ payload.mot = read_size == 0 ? false : true;
payload.length = write_size;
payload.data = write_buf;
- ret = dal_ddc_submit_aux_command(ddc, &payload);
- read_available = ret;
+ success = dal_ddc_submit_aux_command(ddc, &payload);
}
- if (read_size != 0 && read_available) {
+ if (read_size != 0 && success) {
payload.write = false;
+ /* should set mot (middle of transaction) to 0
+ * since it is the last payload to send
+ */
payload.mot = false;
payload.length = read_size;
payload.data = read_buf;
- ret = dal_ddc_submit_aux_command(ddc, &payload);
+ success = dal_ddc_submit_aux_command(ddc, &payload);
}
} else {
struct i2c_command command = {0};
@@ -573,7 +577,7 @@ bool dal_ddc_service_query_ddc_data(
command.number_of_payloads =
dal_ddc_i2c_payloads_get_count(&payloads);
- ret = dm_helpers_submit_i2c(
+ success = dm_helpers_submit_i2c(
ddc->ctx,
ddc->link,
&command);
@@ -581,7 +585,7 @@ bool dal_ddc_service_query_ddc_data(
dal_ddc_i2c_payloads_destroy(&payloads);
}
- return ret;
+ return success;
}
bool dal_ddc_submit_aux_command(struct ddc_service *ddc,
@@ -598,7 +602,7 @@ bool dal_ddc_submit_aux_command(struct ddc_service *ddc,
do {
struct aux_payload current_payload;
- bool is_end_of_payload = (retrieved + DEFAULT_AUX_MAX_DATA_SIZE) >
+ bool is_end_of_payload = (retrieved + DEFAULT_AUX_MAX_DATA_SIZE) >=
payload->length;
current_payload.address = payload->address;
@@ -607,7 +611,10 @@ bool dal_ddc_submit_aux_command(struct ddc_service *ddc,
current_payload.i2c_over_aux = payload->i2c_over_aux;
current_payload.length = is_end_of_payload ?
payload->length - retrieved : DEFAULT_AUX_MAX_DATA_SIZE;
- current_payload.mot = !is_end_of_payload;
+ /* set mot (middle of transaction) to false
+ * if it is the last payload
+ */
+ current_payload.mot = is_end_of_payload ? payload->mot:true;
current_payload.reply = payload->reply;
current_payload.write = payload->write;
@@ -648,16 +655,17 @@ bool dc_link_aux_transfer_with_retries(struct ddc_service *ddc,
}
-uint32_t dc_link_aux_configure_timeout(struct ddc_service *ddc,
+bool dc_link_aux_try_to_configure_timeout(struct ddc_service *ddc,
uint32_t timeout)
{
- uint32_t prev_timeout = 0;
+ bool result = false;
struct ddc *ddc_pin = ddc->ddc_pin;
- if (ddc->ctx->dc->res_pool->engines[ddc_pin->pin_data->en]->funcs->configure_timeout)
- prev_timeout =
- ddc->ctx->dc->res_pool->engines[ddc_pin->pin_data->en]->funcs->configure_timeout(ddc, timeout);
- return prev_timeout;
+ if (ddc->ctx->dc->res_pool->engines[ddc_pin->pin_data->en]->funcs->configure_timeout) {
+ ddc->ctx->dc->res_pool->engines[ddc_pin->pin_data->en]->funcs->configure_timeout(ddc, timeout);
+ result = true;
+ }
+ return result;
}
/*test only function*/
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c b/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c
index 91cd884d6f25..3d969f83b3cc 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c
@@ -12,6 +12,8 @@
#include "dc_link_ddc.h"
#include "core_status.h"
#include "dpcd_defs.h"
+#include "dc_dmub_srv.h"
+#include "dce/dmub_hw_lock_mgr.h"
#define DC_LOGGER \
link->ctx->logger
@@ -245,7 +247,7 @@ static uint8_t dc_dp_initialize_scrambling_data_symbols(
static inline bool is_repeater(struct dc_link *link, uint32_t offset)
{
- return (!link->is_lttpr_mode_transparent && offset != 0);
+ return (link->lttpr_non_transparent_mode && offset != 0);
}
static void dpcd_set_lt_pattern_and_lane_settings(
@@ -1038,7 +1040,7 @@ static enum link_training_result perform_clock_recovery_sequence(
/* 3. wait receiver to lock-on*/
wait_time_microsec = lt_settings->cr_pattern_time;
- if (!link->is_lttpr_mode_transparent)
+ if (link->lttpr_non_transparent_mode)
wait_time_microsec = TRAINING_AUX_RD_INTERVAL;
wait_for_training_aux_rd_interval(
@@ -1102,6 +1104,10 @@ static inline enum link_training_result perform_link_training_int(
dpcd_pattern.v1_4.TRAINING_PATTERN_SET = DPCD_TRAINING_PATTERN_VIDEOIDLE;
dpcd_set_training_pattern(link, dpcd_pattern);
+ /* delay 5ms after notifying sink of idle pattern before switching output */
+ if (link->connector_signal != SIGNAL_TYPE_EDP)
+ msleep(5);
+
/* 4. mainlink output idle pattern*/
dp_set_hw_test_pattern(link, DP_TEST_PATTERN_VIDEO_MODE, NULL, 0);
@@ -1268,7 +1274,7 @@ static void configure_lttpr_mode(struct dc_link *link)
link->dpcd_caps.lttpr_caps.mode = repeater_mode;
}
- if (!link->is_lttpr_mode_transparent) {
+ if (link->lttpr_non_transparent_mode) {
DC_LOG_HW_LINK_TRAINING("%s\n Set LTTPR to Non Transparent Mode\n", __func__);
@@ -1473,7 +1479,7 @@ enum link_training_result dc_link_dp_perform_link_training(
&lt_settings);
/* Configure lttpr mode */
- if (!link->is_lttpr_mode_transparent)
+ if (link->lttpr_non_transparent_mode)
configure_lttpr_mode(link);
if (link->ctx->dc->work_arounds.lt_early_cr_pattern)
@@ -1489,7 +1495,7 @@ enum link_training_result dc_link_dp_perform_link_training(
dp_set_fec_ready(link, fec_enable);
- if (!link->is_lttpr_mode_transparent) {
+ if (link->lttpr_non_transparent_mode) {
/* 2. perform link training (set link training done
* to false is done as well)
@@ -1551,6 +1557,12 @@ bool perform_link_training_with_retries(
struct dc_link *link = stream->link;
enum dp_panel_mode panel_mode = dp_get_panel_mode(link);
+ /* We need to do this before the link training to ensure the idle pattern in SST
+ * mode will be sent right after the link training
+ */
+ link->link_enc->funcs->connect_dig_be_to_fe(link->link_enc,
+ pipe_ctx->stream_res.stream_enc->id, true);
+
for (j = 0; j < attempts; ++j) {
dp_enable_link_phy(
@@ -1567,12 +1579,6 @@ bool perform_link_training_with_retries(
dp_set_panel_mode(link, panel_mode);
- /* We need to do this before the link training to ensure the idle pattern in SST
- * mode will be sent right after the link training
- */
- link->link_enc->funcs->connect_dig_be_to_fe(link->link_enc,
- pipe_ctx->stream_res.stream_enc->id, true);
-
if (link->aux_access_disabled) {
dc_link_dp_perform_link_training_skip_aux(link, link_setting);
return true;
@@ -1756,7 +1762,7 @@ static struct dc_link_settings get_max_link_cap(struct dc_link *link)
* account for lttpr repeaters cap
* notes: repeaters do not snoop in the DPRX Capabilities addresses (3.6.3).
*/
- if (!link->is_lttpr_mode_transparent) {
+ if (link->lttpr_non_transparent_mode) {
if (link->dpcd_caps.lttpr_caps.max_lane_count < max_link_cap.lane_count)
max_link_cap.lane_count = link->dpcd_caps.lttpr_caps.max_lane_count;
@@ -1914,7 +1920,7 @@ bool dp_verify_link_cap(
max_link_cap = get_max_link_cap(link);
/* Grant extended timeout request */
- if (!link->is_lttpr_mode_transparent && link->dpcd_caps.lttpr_caps.max_ext_timeout > 0) {
+ if (link->lttpr_non_transparent_mode && link->dpcd_caps.lttpr_caps.max_ext_timeout > 0) {
uint8_t grant = link->dpcd_caps.lttpr_caps.max_ext_timeout & 0x80;
core_link_write_dpcd(link, DP_PHY_REPEATER_EXTENDED_WAIT_TIMEOUT, &grant, sizeof(grant));
@@ -2849,7 +2855,6 @@ bool dc_link_handle_hpd_rx_irq(struct dc_link *link, union hpd_irq_data *out_hpd
enum dc_status result;
bool status = false;
struct pipe_ctx *pipe_ctx;
- struct dc_link_settings previous_link_settings;
int i;
if (out_link_loss)
@@ -2928,32 +2933,25 @@ bool dc_link_handle_hpd_rx_irq(struct dc_link *link, union hpd_irq_data *out_hpd
for (i = 0; i < MAX_PIPES; i++) {
pipe_ctx = &link->dc->current_state->res_ctx.pipe_ctx[i];
if (pipe_ctx && pipe_ctx->stream && pipe_ctx->stream->link == link)
- link->dc->hwss.blank_stream(pipe_ctx);
- }
-
- for (i = 0; i < MAX_PIPES; i++) {
- pipe_ctx = &link->dc->current_state->res_ctx.pipe_ctx[i];
- if (pipe_ctx && pipe_ctx->stream && pipe_ctx->stream->link == link)
break;
}
if (pipe_ctx == NULL || pipe_ctx->stream == NULL)
return false;
- previous_link_settings = link->cur_link_settings;
-
- perform_link_training_with_retries(&previous_link_settings,
- true, LINK_TRAINING_ATTEMPTS,
- pipe_ctx,
- pipe_ctx->stream->signal);
- if (pipe_ctx->stream->signal == SIGNAL_TYPE_DISPLAY_PORT_MST)
- dc_link_reallocate_mst_payload(link);
+ for (i = 0; i < MAX_PIPES; i++) {
+ pipe_ctx = &link->dc->current_state->res_ctx.pipe_ctx[i];
+ if (pipe_ctx && pipe_ctx->stream && !pipe_ctx->stream->dpms_off &&
+ pipe_ctx->stream->link == link)
+ core_link_disable_stream(pipe_ctx);
+ }
for (i = 0; i < MAX_PIPES; i++) {
pipe_ctx = &link->dc->current_state->res_ctx.pipe_ctx[i];
- if (pipe_ctx && pipe_ctx->stream && pipe_ctx->stream->link == link)
- link->dc->hwss.unblank_stream(pipe_ctx, &previous_link_settings);
+ if (pipe_ctx && pipe_ctx->stream && !pipe_ctx->stream->dpms_off &&
+ pipe_ctx->stream->link == link)
+ core_link_enable_stream(link->dc->current_state, pipe_ctx);
}
status = false;
@@ -3255,17 +3253,8 @@ static bool retrieve_link_cap(struct dc_link *link)
uint32_t read_dpcd_retry_cnt = 3;
int i;
struct dp_sink_hw_fw_revision dp_hw_fw_revision;
-
- /* Set default timeout to 3.2ms and read LTTPR capabilities */
- bool ext_timeout_support = link->dc->caps.extended_aux_timeout_support &&
- !link->dc->config.disable_extended_timeout_support;
-
- link->is_lttpr_mode_transparent = true;
-
- if (ext_timeout_support) {
- dc_link_aux_configure_timeout(link->ddc,
- LINK_AUX_DEFAULT_EXTENDED_TIMEOUT_PERIOD);
- }
+ bool is_lttpr_present = false;
+ const uint32_t post_oui_delay = 30; // 30ms
memset(dpcd_data, '\0', sizeof(dpcd_data));
memset(lttpr_dpcd_data, '\0', sizeof(lttpr_dpcd_data));
@@ -3274,6 +3263,13 @@ static bool retrieve_link_cap(struct dc_link *link)
memset(&edp_config_cap, '\0',
sizeof(union edp_configuration_cap));
+ /* if extended timeout is supported in hardware,
+ * default to LTTPR timeout (3.2ms) first as a W/A for DP link layer
+ * CTS 4.2.1.1 regression introduced by CTS specs requirement update.
+ */
+ dc_link_aux_try_to_configure_timeout(link->ddc,
+ LINK_AUX_DEFAULT_LTTPR_TIMEOUT_PERIOD);
+
status = core_link_read_dpcd(link, DP_SET_POWER,
&dpcd_power_state, sizeof(dpcd_power_state));
@@ -3285,6 +3281,12 @@ static bool retrieve_link_cap(struct dc_link *link)
if (status != DC_OK || dpcd_power_state == DP_SET_POWER_D3)
udelay(1000);
+ dpcd_set_source_specific_data(link);
+ /* Sink may need to configure internals based on vendor, so allow some
+ * time before proceeding with possibly vendor specific transactions
+ */
+ msleep(post_oui_delay);
+
for (i = 0; i < read_dpcd_retry_cnt; i++) {
status = core_link_read_dpcd(
link,
@@ -3300,8 +3302,14 @@ static bool retrieve_link_cap(struct dc_link *link)
return false;
}
- if (ext_timeout_support) {
-
+ if (link->dc->caps.extended_aux_timeout_support &&
+ link->dc->config.allow_lttpr_non_transparent_mode) {
+ /* By reading LTTPR capability, RX assumes that we will enable
+ * LTTPR non transparent if LTTPR is present.
+ * Therefore, only query LTTPR capability when both LTTPR
+ * extended aux timeout and
+ * non transparent mode is supported by hardware
+ */
status = core_link_read_dpcd(
link,
DP_LT_TUNABLE_PHY_REPEATER_FIELD_DATA_STRUCTURE_REV,
@@ -3332,20 +3340,21 @@ static bool retrieve_link_cap(struct dc_link *link)
lttpr_dpcd_data[DP_PHY_REPEATER_EXTENDED_WAIT_TIMEOUT -
DP_LT_TUNABLE_PHY_REPEATER_FIELD_DATA_STRUCTURE_REV];
- if (link->dpcd_caps.lttpr_caps.phy_repeater_cnt > 0 &&
+ is_lttpr_present = (link->dpcd_caps.lttpr_caps.phy_repeater_cnt > 0 &&
link->dpcd_caps.lttpr_caps.max_lane_count > 0 &&
link->dpcd_caps.lttpr_caps.max_lane_count <= 4 &&
- link->dpcd_caps.lttpr_caps.revision.raw >= 0x14) {
- link->is_lttpr_mode_transparent = false;
- } else {
- /*No lttpr reset timeout to its default value*/
- link->is_lttpr_mode_transparent = true;
- dc_link_aux_configure_timeout(link->ddc, LINK_AUX_DEFAULT_TIMEOUT_PERIOD);
- }
-
- CONN_DATA_DETECT(link, lttpr_dpcd_data, sizeof(lttpr_dpcd_data), "LTTPR Caps: ");
+ link->dpcd_caps.lttpr_caps.revision.raw >= 0x14);
+ if (is_lttpr_present)
+ CONN_DATA_DETECT(link, lttpr_dpcd_data, sizeof(lttpr_dpcd_data), "LTTPR Caps: ");
}
+ /* decide lttpr non transparent mode */
+ link->lttpr_non_transparent_mode = is_lttpr_present;
+
+ if (!is_lttpr_present)
+ dc_link_aux_try_to_configure_timeout(link->ddc, LINK_AUX_DEFAULT_TIMEOUT_PERIOD);
+
+
{
union training_aux_rd_interval aux_rd_interval;
@@ -3378,7 +3387,7 @@ static bool retrieve_link_cap(struct dc_link *link)
link->dpcd_caps.dpcd_rev.raw =
dpcd_data[DP_DPCD_REV - DP_DPCD_REV];
- if (link->dpcd_caps.dpcd_rev.raw >= 0x14) {
+ if (link->dpcd_caps.ext_receiver_cap_field_present) {
for (i = 0; i < read_dpcd_retry_cnt; i++) {
status = core_link_read_dpcd(
link,
@@ -4034,9 +4043,23 @@ bool dc_link_dp_set_test_pattern(
break;
}
- if (pipe_ctx->stream_res.tg->funcs->lock_doublebuffer_enable)
- pipe_ctx->stream_res.tg->funcs->lock_doublebuffer_enable(
- pipe_ctx->stream_res.tg);
+ if (pipe_ctx->stream_res.tg->funcs->lock_doublebuffer_enable) {
+ if (pipe_ctx->stream && should_use_dmub_lock(pipe_ctx->stream->link)) {
+ union dmub_hw_lock_flags hw_locks = { 0 };
+ struct dmub_hw_lock_inst_flags inst_flags = { 0 };
+
+ hw_locks.bits.lock_dig = 1;
+ inst_flags.dig_inst = pipe_ctx->stream_res.tg->inst;
+
+ dmub_hw_lock_mgr_cmd(link->ctx->dmub_srv,
+ true,
+ &hw_locks,
+ &inst_flags);
+ } else
+ pipe_ctx->stream_res.tg->funcs->lock_doublebuffer_enable(
+ pipe_ctx->stream_res.tg);
+ }
+
pipe_ctx->stream_res.tg->funcs->lock(pipe_ctx->stream_res.tg);
/* update MSA to requested color space */
pipe_ctx->stream_res.stream_enc->funcs->dp_set_stream_attribute(pipe_ctx->stream_res.stream_enc,
@@ -4063,9 +4086,24 @@ bool dc_link_dp_set_test_pattern(
CRTC_STATE_VBLANK);
pipe_ctx->stream_res.tg->funcs->wait_for_state(pipe_ctx->stream_res.tg,
CRTC_STATE_VACTIVE);
- if (pipe_ctx->stream_res.tg->funcs->lock_doublebuffer_disable)
- pipe_ctx->stream_res.tg->funcs->lock_doublebuffer_disable(
- pipe_ctx->stream_res.tg);
+
+ if (pipe_ctx->stream_res.tg->funcs->lock_doublebuffer_disable) {
+ if (pipe_ctx->stream && should_use_dmub_lock(pipe_ctx->stream->link)) {
+ union dmub_hw_lock_flags hw_locks = { 0 };
+ struct dmub_hw_lock_inst_flags inst_flags = { 0 };
+
+ hw_locks.bits.lock_dig = 1;
+ inst_flags.dig_inst = pipe_ctx->stream_res.tg->inst;
+
+ dmub_hw_lock_mgr_cmd(link->ctx->dmub_srv,
+ false,
+ &hw_locks,
+ &inst_flags);
+ } else
+ pipe_ctx->stream_res.tg->funcs->lock_doublebuffer_disable(
+ pipe_ctx->stream_res.tg);
+ }
+
/* Set Test Pattern state */
link->test_pattern_enabled = true;
}
@@ -4255,7 +4293,6 @@ void dp_set_fec_enable(struct dc_link *link, bool enable)
void dpcd_set_source_specific_data(struct dc_link *link)
{
- const uint32_t post_oui_delay = 30; // 30ms
uint8_t dspc = 0;
enum dc_status ret;
@@ -4296,10 +4333,6 @@ void dpcd_set_source_specific_data(struct dc_link *link)
link->dc->vendor_signature.data.raw,
sizeof(link->dc->vendor_signature.data.raw));
}
-
- // Sink may need to configure internals based on vendor, so allow some
- // time before proceeding with possibly vendor specific transactions
- msleep(post_oui_delay);
}
bool dc_link_set_backlight_level_nits(struct dc_link *link,
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link_hwss.c b/drivers/gpu/drm/amd/display/dc/core/dc_link_hwss.c
index 6590f51caefa..1b3474aa380d 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_link_hwss.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_link_hwss.c
@@ -167,7 +167,8 @@ bool edp_receiver_ready_T9(struct dc_link *link)
} while (++tries < 50);
}
- if (link->local_sink->edid_caps.panel_patch.extra_delay_backlight_off > 0)
+ if (link->local_sink &&
+ link->local_sink->edid_caps.panel_patch.extra_delay_backlight_off > 0)
udelay(link->local_sink->edid_caps.panel_patch.extra_delay_backlight_off * 1000);
return result;
@@ -201,7 +202,8 @@ bool edp_receiver_ready_T7(struct dc_link *link)
} while (time_taken_in_ns < 50 * 1000000); //MAx T7 is 50ms
}
- if (link->local_sink->edid_caps.panel_patch.extra_t7_ms > 0)
+ if (link->local_sink &&
+ link->local_sink->edid_caps.panel_patch.extra_t7_ms > 0)
udelay(link->local_sink->edid_caps.panel_patch.extra_t7_ms * 1000);
return result;
@@ -281,7 +283,7 @@ void dp_set_hw_lane_settings(
{
struct link_encoder *encoder = link->link_enc;
- if (!link->is_lttpr_mode_transparent && !is_immediate_downstream(link, offset))
+ if (link->lttpr_non_transparent_mode && !is_immediate_downstream(link, offset))
return;
/* call Encoder to set lane settings */
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
index 0c5619364e7d..1000dc6daf72 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
@@ -52,6 +52,9 @@
#include "dcn20/dcn20_resource.h"
#include "dcn21/dcn21_resource.h"
#endif
+#if defined(CONFIG_DRM_AMD_DC_DCN3_0)
+#include "../dcn30/dcn30_resource.h"
+#endif
#define DC_LOGGER_INIT(logger)
@@ -107,6 +110,10 @@ enum dce_version resource_parse_asic_id(struct hw_asic_id asic_id)
case FAMILY_NV:
dc_version = DCN_VERSION_2_0;
+#if defined(CONFIG_DRM_AMD_DC_DCN3_0)
+ if (ASICREV_IS_SIENNA_CICHLID_P(asic_id.hw_internal_rev))
+ dc_version = DCN_VERSION_3_0;
+#endif
break;
default:
dc_version = DCE_VERSION_UNKNOWN;
@@ -168,6 +175,11 @@ struct resource_pool *dc_create_resource_pool(struct dc *dc,
res_pool = dcn21_create_resource_pool(init_data, dc);
break;
#endif
+#if defined(CONFIG_DRM_AMD_DC_DCN3_0)
+ case DCN_VERSION_3_0:
+ res_pool = dcn30_create_resource_pool(init_data, dc);
+ break;
+#endif
default:
break;
@@ -282,6 +294,16 @@ bool resource_construct(
}
}
+#if defined(CONFIG_DRM_AMD_DC_DCN3_0)
+ for (i = 0; i < caps->num_mpc_3dlut; i++) {
+ pool->mpc_lut[i] = dc_create_3dlut_func();
+ if (pool->mpc_lut[i] == NULL)
+ DC_ERR("DC: failed to create MPC 3dlut!\n");
+ pool->mpc_shaper[i] = dc_create_transfer_func();
+ if (pool->mpc_shaper[i] == NULL)
+ DC_ERR("DC: failed to create MPC shaper!\n");
+ }
+#endif
dc->caps.dynamic_audio = false;
if (pool->audio_count < pool->stream_enc_count) {
dc->caps.dynamic_audio = true;
@@ -377,6 +399,10 @@ bool resource_are_streams_timing_synchronizable(
!= stream2->timing.v_addressable)
return false;
+ if (stream1->timing.v_front_porch
+ != stream2->timing.v_front_porch)
+ return false;
+
if (stream1->timing.pix_clk_100hz
!= stream2->timing.pix_clk_100hz)
return false;
@@ -2049,8 +2075,16 @@ enum dc_status resource_map_pool_resources(
}
/* Add ABM to the resource if on EDP */
- if (pipe_ctx->stream && dc_is_embedded_signal(pipe_ctx->stream->signal))
+ if (pipe_ctx->stream && dc_is_embedded_signal(pipe_ctx->stream->signal)) {
+#if defined(CONFIG_DRM_AMD_DC_DCN3_0)
+ if (pool->abm)
+ pipe_ctx->stream_res.abm = pool->abm;
+ else
+ pipe_ctx->stream_res.abm = pool->multiple_abms[pipe_ctx->stream_res.tg->inst];
+#else
pipe_ctx->stream_res.abm = pool->abm;
+#endif
+ }
for (i = 0; i < context->stream_count; i++)
if (context->streams[i] == stream) {
@@ -2867,6 +2901,10 @@ unsigned int resource_pixel_format_to_bpp(enum surface_pixel_format format)
case SURFACE_PIXEL_FORMAT_GRPH_ARGB2101010:
case SURFACE_PIXEL_FORMAT_GRPH_ABGR2101010:
case SURFACE_PIXEL_FORMAT_GRPH_ABGR2101010_XR_BIAS:
+#if defined(CONFIG_DRM_AMD_DC_DCN3_0)
+ case SURFACE_PIXEL_FORMAT_GRPH_RGBE:
+ case SURFACE_PIXEL_FORMAT_GRPH_RGBE_ALPHA:
+#endif
return 32;
case SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616:
case SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616F:
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_stream.c b/drivers/gpu/drm/amd/display/dc/core/dc_stream.c
index 4f0e7203dba4..3b897372ed27 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_stream.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_stream.c
@@ -243,6 +243,9 @@ bool dc_stream_set_cursor_attributes(
struct dc *dc;
struct resource_context *res_ctx;
struct pipe_ctx *pipe_to_program = NULL;
+#if defined(CONFIG_DRM_AMD_DC_DCN3_0)
+ bool reset_idle_optimizations = false;
+#endif
if (NULL == stream) {
dm_error("DC: dc_stream is NULL!\n");
@@ -262,6 +265,15 @@ bool dc_stream_set_cursor_attributes(
res_ctx = &dc->current_state->res_ctx;
stream->cursor_attributes = *attributes;
+#if defined(CONFIG_DRM_AMD_DC_DCN3_0)
+ /* disable idle optimizations while updating cursor */
+ if (dc->idle_optimizations_allowed) {
+ dc->hwss.apply_idle_power_optimizations(dc, false);
+ reset_idle_optimizations = true;
+ }
+
+#endif
+
for (i = 0; i < MAX_PIPES; i++) {
struct pipe_ctx *pipe_ctx = &res_ctx->pipe_ctx[i];
@@ -281,6 +293,12 @@ bool dc_stream_set_cursor_attributes(
if (pipe_to_program)
dc->hwss.cursor_lock(dc, pipe_to_program, false);
+#if defined(CONFIG_DRM_AMD_DC_DCN3_0)
+ /* re-enable idle optimizations if necessary */
+ if (reset_idle_optimizations)
+ dc->hwss.apply_idle_power_optimizations(dc, true);
+
+#endif
return true;
}
@@ -292,6 +310,9 @@ bool dc_stream_set_cursor_position(
struct dc *dc;
struct resource_context *res_ctx;
struct pipe_ctx *pipe_to_program = NULL;
+#if defined(CONFIG_DRM_AMD_DC_DCN3_0)
+ bool reset_idle_optimizations = false;
+#endif
if (NULL == stream) {
dm_error("DC: dc_stream is NULL!\n");
@@ -305,6 +326,16 @@ bool dc_stream_set_cursor_position(
dc = stream->ctx->dc;
res_ctx = &dc->current_state->res_ctx;
+#if defined(CONFIG_DRM_AMD_DC_DCN3_0)
+
+ /* disable idle optimizations if enabling cursor */
+ if (dc->idle_optimizations_allowed &&
+ !stream->cursor_position.enable && position->enable) {
+ dc->hwss.apply_idle_power_optimizations(dc, false);
+ reset_idle_optimizations = true;
+ }
+
+#endif
stream->cursor_position = *position;
for (i = 0; i < MAX_PIPES; i++) {
@@ -328,6 +359,12 @@ bool dc_stream_set_cursor_position(
if (pipe_to_program)
dc->hwss.cursor_lock(dc, pipe_to_program, false);
+#if defined(CONFIG_DRM_AMD_DC_DCN3_0)
+ /* re-enable idle optimizations if necessary */
+ if (reset_idle_optimizations)
+ dc->hwss.apply_idle_power_optimizations(dc, true);
+
+#endif
return true;
}
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_vm_helper.c b/drivers/gpu/drm/amd/display/dc/core/dc_vm_helper.c
index 64cf24a9ab08..f2b39ec35c89 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_vm_helper.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_vm_helper.c
@@ -47,9 +47,6 @@ int dc_setup_system_context(struct dc *dc, struct dc_phy_addr_space_config *pa_c
*/
memcpy(&dc->vm_pa_config, pa_config, sizeof(struct dc_phy_addr_space_config));
dc->vm_pa_config.valid = true;
-
- if (pa_config->is_hvm_enabled == 0)
- dc->debug.nv12_iflip_vm_wa = false;
}
return num_vmids;
diff --git a/drivers/gpu/drm/amd/display/dc/dc.h b/drivers/gpu/drm/amd/display/dc/dc.h
index 85908561c741..f7cb1354a635 100644
--- a/drivers/gpu/drm/amd/display/dc/dc.h
+++ b/drivers/gpu/drm/amd/display/dc/dc.h
@@ -42,7 +42,7 @@
#include "inc/hw/dmcu.h"
#include "dml/display_mode_lib.h"
-#define DC_VER "3.2.84"
+#define DC_VER "3.2.91"
#define MAX_SURFACES 3
#define MAX_PLANES 6
@@ -186,6 +186,15 @@ struct dc_dcc_setting {
unsigned int max_compressed_blk_size;
unsigned int max_uncompressed_blk_size;
bool independent_64b_blks;
+#if defined(CONFIG_DRM_AMD_DC_DCN3_0)
+ //These bitfields to be used starting with DCN 3.0
+ struct {
+ uint32_t dcc_256_64_64 : 1;//available in ASICs before DCN 3.0 (the worst compression case)
+ uint32_t dcc_128_128_uncontrained : 1; //available in ASICs before DCN 3.0
+ uint32_t dcc_256_128_128 : 1; //available starting with DCN 3.0
+ uint32_t dcc_256_256_unconstrained : 1; //available in ASICs before DCN 3.0 (the best compression case)
+ } dcc_controls;
+#endif
};
struct dc_surface_dcc_cap {
@@ -274,10 +283,13 @@ struct dc_config {
bool edp_not_connected;
bool force_enum_edp;
bool forced_clocks;
- bool disable_extended_timeout_support; // Used to disable extended timeout and lttpr feature as well
+ bool allow_lttpr_non_transparent_mode;
bool multi_mon_pp_mclk_switch;
bool disable_dmcu;
bool enable_4to1MPC;
+#if defined(CONFIG_DRM_AMD_DC_DCN3_0)
+ bool clamp_min_dcfclk;
+#endif
};
enum visual_confirm {
@@ -325,6 +337,7 @@ enum dcn_pwr_state {
struct dc_clocks {
int dispclk_khz;
int dppclk_khz;
+ int disp_dpp_voltage_level_khz;
int dcfclk_khz;
int socclk_khz;
int dcfclk_deep_sleep_khz;
@@ -455,10 +468,16 @@ struct dc_debug_options {
bool skip_detection_link_training;
bool remove_disconnect_edp;
unsigned int force_odm_combine; //bit vector based on otg inst
+#if defined(CONFIG_DRM_AMD_DC_DCN3_0)
+ unsigned int force_odm_combine_4to1; //bit vector based on otg inst
+#endif
unsigned int force_fclk_khz;
bool disable_tri_buf;
bool dmub_offload_enabled;
bool dmcub_emulation;
+#if defined(CONFIG_DRM_AMD_DC_DCN3_0)
+ bool disable_idle_power_optimizations;
+#endif
bool dmub_command_table; /* for testing only */
struct dc_bw_validation_profile bw_val_profile;
bool disable_fec;
@@ -467,11 +486,13 @@ struct dc_debug_options {
* watermarks are not affected.
*/
unsigned int force_min_dcfclk_mhz;
+#if defined(CONFIG_DRM_AMD_DC_DCN3_0)
+ int dwb_fi_phase;
+#endif
bool disable_timing_sync;
bool cm_in_bypass;
int force_clock_mode;/*every mode change.*/
- bool nv12_iflip_vm_wa;
bool disable_dram_clock_change_vactive_support;
bool validate_dml_output;
bool enable_dmcub_surface_flip;
@@ -573,6 +594,9 @@ struct dc {
/* Require to optimize clocks and bandwidth for added/removed planes */
bool optimized_required;
bool wm_optimized_required;
+#if defined(CONFIG_DRM_AMD_DC_DCN3_0)
+ bool idle_optimizations_allowed;
+#endif
/* Require to maintain clocks and bandwidth for UEFI enabled HW */
int optimize_seamless_boot_streams;
@@ -629,6 +653,9 @@ struct dc_init_data {
*/
const struct gpu_info_soc_bounding_box_v1_0 *soc_bounding_box;
struct dpcd_vendor_signature vendor_signature;
+#if defined(CONFIG_DRM_AMD_DC_DCN3_0)
+ bool force_smu_not_present;
+#endif
};
struct dc_callback_init {
@@ -822,6 +849,9 @@ struct dc_plane_state {
struct dc_transfer_func *in_shaper_func;
struct dc_transfer_func *blend_tf;
+#if defined(CONFIG_DRM_AMD_DC_DCN3_0)
+ struct dc_transfer_func *gamcor_tf;
+#endif
enum surface_pixel_format format;
enum dc_rotation_angle rotation;
enum plane_stereo_format stereo_format;
@@ -967,6 +997,14 @@ void dc_resource_state_construct(
const struct dc *dc,
struct dc_state *dst_ctx);
+#if defined(CONFIG_DRM_AMD_DC_DCN3_0)
+bool dc_acquire_release_mpc_3dlut(
+ struct dc *dc, bool acquire,
+ struct dc_stream_state *stream,
+ struct dc_3dlut **lut,
+ struct dc_transfer_func **shaper);
+#endif
+
void dc_resource_state_copy_construct(
const struct dc_state *src_ctx,
struct dc_state *dst_ctx);
@@ -1086,6 +1124,10 @@ struct hdcp_caps {
#include "dc_link.h"
+#if defined(CONFIG_DRM_AMD_DC_DCN3_0)
+uint32_t dc_get_opp_for_plane(struct dc *dc, struct dc_plane_state *plane);
+
+#endif
/*******************************************************************************
* Sink Interfaces - A sink corresponds to a display output device
******************************************************************************/
@@ -1199,6 +1241,23 @@ bool dc_is_dmcu_initialized(struct dc *dc);
enum dc_status dc_set_clock(struct dc *dc, enum dc_clock_type clock_type, uint32_t clk_khz, uint32_t stepping);
void dc_get_clock(struct dc *dc, enum dc_clock_type clock_type, struct dc_clock_config *clock_cfg);
+#if defined(CONFIG_DRM_AMD_DC_DCN3_0)
+
+void dc_allow_idle_optimizations(struct dc *dc, bool allow);
+
+/*
+ * blank all streams, and set min and max memory clock to
+ * lowest and highest DPM level, respectively
+ */
+void dc_unlock_memory_clock_frequency(struct dc *dc);
+
+/*
+ * set min memory clock to the min required for current mode,
+ * max to maxDPM, and unblank streams
+ */
+void dc_lock_memory_clock_frequency(struct dc *dc);
+
+#endif
/*******************************************************************************
* DSC Interfaces
******************************************************************************/
diff --git a/drivers/gpu/drm/amd/display/dc/dc_bios_types.h b/drivers/gpu/drm/amd/display/dc/dc_bios_types.h
index b1dd0d60d98e..845a3054f21f 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_bios_types.h
+++ b/drivers/gpu/drm/amd/display/dc/dc_bios_types.h
@@ -89,7 +89,6 @@ struct dc_vbios_funcs {
bool (*is_device_id_supported)(
struct dc_bios *bios,
struct device_id id);
-
/* COMMANDS */
enum bp_result (*encoder_control)(
@@ -131,6 +130,9 @@ struct dc_vbios_funcs {
enum bp_result (*get_board_layout_info)(
struct dc_bios *dcb,
struct board_layout_info *board_layout_info);
+ uint16_t (*pack_data_tables)(
+ struct dc_bios *dcb,
+ void *dst);
};
struct bios_registers {
@@ -151,6 +153,7 @@ struct dc_bios {
struct integrated_info *integrated_info;
struct dc_firmware_info fw_info;
bool fw_info_valid;
+ struct dc_vram_info vram_info;
};
#endif /* DC_BIOS_TYPES_H */
diff --git a/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c b/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c
index eea2429ac67d..96532f7ba480 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c
+++ b/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c
@@ -106,29 +106,17 @@ void dc_dmub_srv_wait_idle(struct dc_dmub_srv *dc_dmub_srv)
DC_ERROR("Error waiting for DMUB idle: status=%d\n", status);
}
-void dc_dmub_srv_wait_phy_init(struct dc_dmub_srv *dc_dmub_srv)
+bool dc_dmub_srv_optimized_init_done(struct dc_dmub_srv *dc_dmub_srv)
{
- struct dmub_srv *dmub = dc_dmub_srv->dmub;
- struct dc_context *dc_ctx = dc_dmub_srv->ctx;
- enum dmub_status status;
+ struct dmub_srv *dmub;
+ union dmub_fw_boot_status status;
- for (;;) {
- /* Wait up to a second for PHY init. */
- status = dmub_srv_wait_for_phy_init(dmub, 1000000);
- if (status == DMUB_STATUS_OK)
- /* Initialization OK */
- break;
+ if (!dc_dmub_srv || !dc_dmub_srv->dmub)
+ return false;
- DC_ERROR("DMCUB PHY init failed: status=%d\n", status);
- ASSERT(0);
+ dmub = dc_dmub_srv->dmub;
- if (status != DMUB_STATUS_TIMEOUT)
- /*
- * Server likely initialized or we don't have
- * DMCUB HW support - this won't end.
- */
- break;
+ status = dmub->hw_funcs.get_fw_status(dmub);
- /* Continue spinning so we don't hang the ASIC. */
- }
+ return status.bits.optimized_init_done;
}
diff --git a/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.h b/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.h
index a3a09ccb6d26..8bd20d0d7689 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.h
+++ b/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.h
@@ -56,4 +56,6 @@ void dc_dmub_srv_wait_idle(struct dc_dmub_srv *dc_dmub_srv);
void dc_dmub_srv_wait_phy_init(struct dc_dmub_srv *dc_dmub_srv);
+bool dc_dmub_srv_optimized_init_done(struct dc_dmub_srv *dc_dmub_srv);
+
#endif /* _DMUB_DC_SRV_H_ */
diff --git a/drivers/gpu/drm/amd/display/dc/dc_hw_types.h b/drivers/gpu/drm/amd/display/dc/dc_hw_types.h
index a8dc3082e3e1..b7a8c71e3e39 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_hw_types.h
+++ b/drivers/gpu/drm/amd/display/dc/dc_hw_types.h
@@ -62,6 +62,9 @@ enum dc_plane_addr_type {
PLN_ADDR_TYPE_GRAPHICS = 0,
PLN_ADDR_TYPE_GRPH_STEREO,
PLN_ADDR_TYPE_VIDEO_PROGRESSIVE,
+#if defined(CONFIG_DRM_AMD_DC_DCN3_0)
+ PLN_ADDR_TYPE_RGBEA
+#endif
};
struct dc_plane_address {
@@ -84,6 +87,16 @@ struct dc_plane_address {
PHYSICAL_ADDRESS_LOC right_meta_addr;
union large_integer right_dcc_const_color;
+#if defined(CONFIG_DRM_AMD_DC_DCN3_0)
+ PHYSICAL_ADDRESS_LOC left_alpha_addr;
+ PHYSICAL_ADDRESS_LOC left_alpha_meta_addr;
+ union large_integer left_alpha_dcc_const_color;
+
+ PHYSICAL_ADDRESS_LOC right_alpha_addr;
+ PHYSICAL_ADDRESS_LOC right_alpha_meta_addr;
+ union large_integer right_alpha_dcc_const_color;
+#endif
+
} grph_stereo;
/*video progressive*/
@@ -96,6 +109,18 @@ struct dc_plane_address {
PHYSICAL_ADDRESS_LOC chroma_meta_addr;
union large_integer chroma_dcc_const_color;
} video_progressive;
+
+#if defined(CONFIG_DRM_AMD_DC_DCN3_0)
+ struct {
+ PHYSICAL_ADDRESS_LOC addr;
+ PHYSICAL_ADDRESS_LOC meta_addr;
+ union large_integer dcc_const_color;
+
+ PHYSICAL_ADDRESS_LOC alpha_addr;
+ PHYSICAL_ADDRESS_LOC alpha_meta_addr;
+ union large_integer alpha_dcc_const_color;
+ } rgbea;
+#endif
};
union large_integer page_table_base;
@@ -131,9 +156,15 @@ struct dc_plane_dcc_param {
int meta_pitch;
bool independent_64b_blks;
+#if defined(CONFIG_DRM_AMD_DC_DCN3_0)
+ uint8_t dcc_ind_blk;
+#endif
int meta_pitch_c;
bool independent_64b_blks_c;
+#if defined(CONFIG_DRM_AMD_DC_DCN3_0)
+ uint8_t dcc_ind_blk_c;
+#endif
};
/*Displayable pixel format in fb*/
@@ -169,6 +200,10 @@ enum surface_pixel_format {
SURFACE_PIXEL_FORMAT_GRPH_BGR101111_FIX,
SURFACE_PIXEL_FORMAT_GRPH_RGB111110_FLOAT,
SURFACE_PIXEL_FORMAT_GRPH_BGR101111_FLOAT,
+#if defined(CONFIG_DRM_AMD_DC_DCN3_0)
+ SURFACE_PIXEL_FORMAT_GRPH_RGBE,
+ SURFACE_PIXEL_FORMAT_GRPH_RGBE_ALPHA,
+#endif
SURFACE_PIXEL_FORMAT_VIDEO_BEGIN,
SURFACE_PIXEL_FORMAT_VIDEO_420_YCbCr =
SURFACE_PIXEL_FORMAT_VIDEO_BEGIN,
@@ -355,6 +390,7 @@ union dc_tiling_info {
bool meta_linear;
bool rb_aligned;
bool pipe_aligned;
+ unsigned int num_pkrs;
} gfx9;
};
@@ -813,6 +849,42 @@ enum dwb_stereo_type {
DWB_STEREO_TYPE_FRAME_SEQUENTIAL = 3, /* Frame sequential */
};
+#ifdef CONFIG_DRM_AMD_DC_DCN3_0
+
+enum dwb_out_format {
+ DWB_OUT_FORMAT_32BPP_ARGB = 0,
+ DWB_OUT_FORMAT_32BPP_RGBA = 1,
+ DWB_OUT_FORMAT_64BPP_ARGB = 2,
+ DWB_OUT_FORMAT_64BPP_RGBA = 3
+};
+
+enum dwb_out_denorm {
+ DWB_OUT_DENORM_10BPC = 0,
+ DWB_OUT_DENORM_8BPC = 1,
+ DWB_OUT_DENORM_BYPASS = 2
+};
+
+enum cm_gamut_remap_select {
+ CM_GAMUT_REMAP_MODE_BYPASS = 0,
+ CM_GAMUT_REMAP_MODE_RAMA_COEFF,
+ CM_GAMUT_REMAP_MODE_RAMB_COEFF,
+ CM_GAMUT_REMAP_MODE_RESERVED
+};
+
+enum cm_gamut_coef_format {
+ CM_GAMUT_REMAP_COEF_FORMAT_S2_13 = 0,
+ CM_GAMUT_REMAP_COEF_FORMAT_S3_12 = 1
+};
+
+struct mcif_warmup_params {
+ union large_integer start_address;
+ unsigned int address_increment;
+ unsigned int region_size;
+ unsigned int p_vmid;
+};
+
+#endif
+
#define MCIF_BUF_COUNT 4
struct mcif_buf_params {
@@ -822,6 +894,9 @@ struct mcif_buf_params {
unsigned int chroma_pitch;
unsigned int warmup_pitch;
unsigned int swlock;
+#ifdef CONFIG_DRM_AMD_DC_DCN3_0
+ unsigned int p_vmid;
+#endif
};
diff --git a/drivers/gpu/drm/amd/display/dc/dc_link.h b/drivers/gpu/drm/amd/display/dc/dc_link.h
index f63fc25aa6c5..e002ef706e1d 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_link.h
+++ b/drivers/gpu/drm/amd/display/dc/dc_link.h
@@ -100,7 +100,7 @@ struct dc_link {
bool link_state_valid;
bool aux_access_disabled;
bool sync_lt_in_progress;
- bool is_lttpr_mode_transparent;
+ bool lttpr_non_transparent_mode;
/* caps is the same as reported_link_cap. link_traing use
* reported_link_cap. Will clean up. TODO
@@ -311,8 +311,8 @@ bool dc_link_detect_sink(struct dc_link *link, enum dc_connection_type *type);
*/
#ifdef CONFIG_DRM_AMD_DC_HDCP
-bool dc_link_is_hdcp14(struct dc_link *link);
-bool dc_link_is_hdcp22(struct dc_link *link);
+bool dc_link_is_hdcp14(struct dc_link *link, enum signal_type signal);
+bool dc_link_is_hdcp22(struct dc_link *link, enum signal_type signal);
#endif
void dc_link_set_drive_settings(struct dc *dc,
struct link_training_settings *lt_settings,
diff --git a/drivers/gpu/drm/amd/display/dc/dc_stream.h b/drivers/gpu/drm/amd/display/dc/dc_stream.h
index 49aad691e687..f2ed9bc5a319 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_stream.h
+++ b/drivers/gpu/drm/amd/display/dc/dc_stream.h
@@ -87,6 +87,13 @@ struct dc_writeback_info {
int dwb_pipe_inst;
struct dc_dwb_params dwb_params;
struct mcif_buf_params mcif_buf_params;
+#if defined(CONFIG_DRM_AMD_DC_DCN3_0)
+ struct mcif_warmup_params mcif_warmup_params;
+ /* the plane that is the input to TOP_MUX for MPCC that is the DWB source */
+ struct dc_plane_state *writeback_source_plane;
+ /* source MPCC instance. for use by internally by dc */
+ int mpcc_inst;
+#endif
};
struct dc_writeback_update {
@@ -200,6 +207,10 @@ struct dc_stream_state {
/* writeback */
unsigned int num_wb_info;
struct dc_writeback_info writeback_info[MAX_DWB_PIPES];
+#if defined(CONFIG_DRM_AMD_DC_DCN3_0)
+ const struct dc_transfer_func *func_shaper;
+ const struct dc_3dlut *lut3d_func;
+#endif
/* Computed state bits */
bool mode_changed : 1;
@@ -251,6 +262,10 @@ struct dc_stream_update {
struct dc_writeback_update *wb_update;
struct dc_dsc_config *dsc_config;
+#if defined(CONFIG_DRM_AMD_DC_DCN3_0)
+ struct dc_transfer_func *func_shaper;
+ struct dc_3dlut *lut3d_func;
+#endif
};
bool dc_is_stream_unchanged(
diff --git a/drivers/gpu/drm/amd/display/dc/dc_types.h b/drivers/gpu/drm/amd/display/dc/dc_types.h
index f236da1c1859..d64241433548 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_types.h
+++ b/drivers/gpu/drm/amd/display/dc/dc_types.h
@@ -255,6 +255,8 @@ struct dc_edid_caps {
uint8_t qs_bit;
uint8_t qy_bit;
+ uint32_t max_tmds_clk_mhz;
+
/*HDMI 2.0 caps*/
bool lte_340mcsc_scramble;
@@ -475,6 +477,19 @@ enum display_content_type {
DISPLAY_CONTENT_TYPE_GAME = 8
};
+#if defined(CONFIG_DRM_AMD_DC_DCN3_0)
+enum cm_gamut_adjust_type {
+ CM_GAMUT_ADJUST_TYPE_BYPASS = 0,
+ CM_GAMUT_ADJUST_TYPE_HW, /* without adjustments */
+ CM_GAMUT_ADJUST_TYPE_SW /* use adjustments */
+};
+
+struct cm_grph_csc_adjustment {
+ struct fixed31_32 temperature_matrix[12];
+ enum cm_gamut_adjust_type gamut_adjust_type;
+ enum cm_gamut_coef_format gamut_coef_format;
+};
+#endif
/* writeback */
struct dwb_stereo_params {
bool stereo_enabled; /* false: normal mode, true: 3D stereo */
@@ -492,9 +507,21 @@ struct dc_dwb_cnv_params {
unsigned int crop_x; /* cropped window start x value at cnv output */
unsigned int crop_y; /* cropped window start y value at cnv output */
enum dwb_cnv_out_bpc cnv_out_bpc; /* cnv output pixel depth - 8bpc or 10bpc */
+#if defined(CONFIG_DRM_AMD_DC_DCN3_0)
+ enum dwb_out_format fc_out_format; /* dwb output pixel format - 2101010 or 16161616 and ARGB or RGBA */
+ enum dwb_out_denorm out_denorm_mode;/* dwb output denormalization mode */
+ unsigned int out_max_pix_val;/* pixel values greater than out_max_pix_val are clamped to out_max_pix_val */
+ unsigned int out_min_pix_val;/* pixel values less than out_min_pix_val are clamped to out_min_pix_val */
+#endif
};
struct dc_dwb_params {
+#if defined(CONFIG_DRM_AMD_DC_DCN3_0)
+ unsigned int dwbscl_black_color; /* must be in FP1.5.10 */
+ unsigned int hdr_mult; /* must be in FP1.6.12 */
+ struct cm_grph_csc_adjustment csc_params;
+ struct dwb_stereo_params stereo_params;
+#endif
struct dc_dwb_cnv_params cnv_params; /* CNV source size and cropping window parameters */
unsigned int dest_width; /* Destination width */
unsigned int dest_height; /* Destination height */
@@ -862,6 +889,15 @@ struct dsc_dec_dpcd_caps {
uint32_t branch_max_line_width;
};
+#if defined(CONFIG_DRM_AMD_DC_DCN3_0)
+enum dc_gpu_mem_alloc_type {
+ DC_MEM_ALLOC_TYPE_GART,
+ DC_MEM_ALLOC_TYPE_FRAME_BUFFER,
+ DC_MEM_ALLOC_TYPE_INVISIBLE_FRAME_BUFFER,
+ DC_MEM_ALLOC_TYPE_AGP
+};
+
+#endif
enum dc_psr_version {
DC_PSR_VERSION_1 = 0,
DC_PSR_VERSION_UNSUPPORTED = 0xFFFFFFFF,
diff --git a/drivers/gpu/drm/amd/display/dc/dce/Makefile b/drivers/gpu/drm/amd/display/dc/dce/Makefile
index f704a8fd52e8..973be8f9fd10 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/Makefile
+++ b/drivers/gpu/drm/amd/display/dc/dce/Makefile
@@ -29,7 +29,8 @@
DCE = dce_audio.o dce_stream_encoder.o dce_link_encoder.o dce_hwseq.o \
dce_mem_input.o dce_clock_source.o dce_scl_filters.o dce_transform.o \
dce_opp.o dce_dmcu.o dce_abm.o dce_ipp.o dce_aux.o \
-dce_i2c.o dce_i2c_hw.o dce_i2c_sw.o dmub_psr.o dmub_abm.o dce_panel_cntl.o
+dce_i2c.o dce_i2c_hw.o dce_i2c_sw.o dmub_psr.o dmub_abm.o dce_panel_cntl.o \
+dmub_hw_lock_mgr.o
AMD_DAL_DCE = $(addprefix $(AMDDALPATH)/dc/dce/,$(DCE))
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_abm.h b/drivers/gpu/drm/amd/display/dc/dce/dce_abm.h
index 9718a4823372..a44effcda49f 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_abm.h
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_abm.h
@@ -76,6 +76,22 @@
SR(DC_ABM1_HGLS_REG_READ_PROGRESS), \
NBIO_SR(BIOS_SCRATCH_2)
+#if defined(CONFIG_DRM_AMD_DC_DCN3_0)
+#define ABM_DCN301_REG_LIST(id)\
+ ABM_COMMON_REG_LIST_DCE_BASE(), \
+ SRI(DC_ABM1_HG_SAMPLE_RATE, ABM, id), \
+ SRI(DC_ABM1_LS_SAMPLE_RATE, ABM, id), \
+ SRI(BL1_PWM_BL_UPDATE_SAMPLE_RATE, ABM, id), \
+ SRI(DC_ABM1_HG_MISC_CTRL, ABM, id), \
+ SRI(DC_ABM1_IPCSC_COEFF_SEL, ABM, id), \
+ SRI(BL1_PWM_CURRENT_ABM_LEVEL, ABM, id), \
+ SRI(BL1_PWM_TARGET_ABM_LEVEL, ABM, id), \
+ SRI(BL1_PWM_USER_LEVEL, ABM, id), \
+ SRI(DC_ABM1_LS_MIN_MAX_PIXEL_VALUE_THRES, ABM, id), \
+ SRI(DC_ABM1_HGLS_REG_READ_PROGRESS, ABM, id), \
+ NBIO_SR(BIOS_SCRATCH_2)
+#endif
+
#define ABM_SF(reg_name, field_name, post_fix)\
.field_name = reg_name ## __ ## field_name ## post_fix
@@ -149,6 +165,10 @@
#define ABM_MASK_SH_LIST_DCN20(mask_sh) ABM_MASK_SH_LIST_DCE110(mask_sh)
+#if defined(CONFIG_DRM_AMD_DC_DCN3_0)
+#define ABM_MASK_SH_LIST_DCN301(mask_sh) ABM_MASK_SH_LIST_DCN10(mask_sh)
+#endif
+
#define ABM_REG_FIELD_LIST(type) \
type ABM1_HG_NUM_OF_BINS_SEL; \
type ABM1_HG_VMAX_SEL; \
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_audio.c b/drivers/gpu/drm/amd/display/dc/dce/dce_audio.c
index 5a35495bc11d..408046579712 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_audio.c
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_audio.c
@@ -140,6 +140,8 @@ static void check_audio_bandwidth_hdmi(
bool limit_freq_to_88_2_khz = false;
bool limit_freq_to_96_khz = false;
bool limit_freq_to_174_4_khz = false;
+ if (!crtc_info)
+ return;
/* For two channels supported return whatever sink support,unmodified*/
if (channel_count > 2) {
@@ -784,7 +786,7 @@ void dce_aud_wall_dto_setup(
struct azalia_clock_info clock_info = { 0 };
- if (dc_is_hdmi_signal(signal)) {
+ if (dc_is_hdmi_tmds_signal(signal)) {
uint32_t src_sel;
/*DTO0 Programming goal:
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_clock_source.c b/drivers/gpu/drm/amd/display/dc/dce/dce_clock_source.c
index d2ad0504b0de..9cc65dc1970f 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_clock_source.c
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_clock_source.c
@@ -1004,16 +1004,58 @@ static bool get_pixel_clk_frequency_100hz(
return false;
}
-
+#if defined(CONFIG_DRM_AMD_DC_DCN3_0)
/* this table is use to find *1.001 and /1.001 pixel rates from non-precise pixel rate */
-struct pixel_rate_range_table_entry {
- unsigned int range_min_khz;
- unsigned int range_max_khz;
- unsigned int target_pixel_rate_khz;
- unsigned short mult_factor;
- unsigned short div_factor;
+const struct pixel_rate_range_table_entry video_optimized_pixel_rates[] = {
+ // /1.001 rates
+ {25170, 25180, 25200, 1000, 1001}, //25.2MHz -> 25.17
+ {59340, 59350, 59400, 1000, 1001}, //59.4Mhz -> 59.340
+ {74170, 74180, 74250, 1000, 1001}, //74.25Mhz -> 74.1758
+ {125870, 125880, 126000, 1000, 1001}, //126Mhz -> 125.87
+ {148350, 148360, 148500, 1000, 1001}, //148.5Mhz -> 148.3516
+ {167830, 167840, 168000, 1000, 1001}, //168Mhz -> 167.83
+ {222520, 222530, 222750, 1000, 1001}, //222.75Mhz -> 222.527
+ {257140, 257150, 257400, 1000, 1001}, //257.4Mhz -> 257.1429
+ {296700, 296710, 297000, 1000, 1001}, //297Mhz -> 296.7033
+ {342850, 342860, 343200, 1000, 1001}, //343.2Mhz -> 342.857
+ {395600, 395610, 396000, 1000, 1001}, //396Mhz -> 395.6
+ {409090, 409100, 409500, 1000, 1001}, //409.5Mhz -> 409.091
+ {445050, 445060, 445500, 1000, 1001}, //445.5Mhz -> 445.055
+ {467530, 467540, 468000, 1000, 1001}, //468Mhz -> 467.5325
+ {519230, 519240, 519750, 1000, 1001}, //519.75Mhz -> 519.231
+ {525970, 525980, 526500, 1000, 1001}, //526.5Mhz -> 525.974
+ {545450, 545460, 546000, 1000, 1001}, //546Mhz -> 545.455
+ {593400, 593410, 594000, 1000, 1001}, //594Mhz -> 593.4066
+ {623370, 623380, 624000, 1000, 1001}, //624Mhz -> 623.377
+ {692300, 692310, 693000, 1000, 1001}, //693Mhz -> 692.308
+ {701290, 701300, 702000, 1000, 1001}, //702Mhz -> 701.2987
+ {791200, 791210, 792000, 1000, 1001}, //792Mhz -> 791.209
+ {890100, 890110, 891000, 1000, 1001}, //891Mhz -> 890.1099
+ {1186810, 1186820, 1188000, 1000, 1001},//1188Mhz -> 1186.8131
+
+ // *1.001 rates
+ {27020, 27030, 27000, 1001, 1000}, //27Mhz
+ {54050, 54060, 54000, 1001, 1000}, //54Mhz
+ {108100, 108110, 108000, 1001, 1000},//108Mhz
};
+const struct pixel_rate_range_table_entry *look_up_in_video_optimized_rate_tlb(
+ unsigned int pixel_rate_khz)
+{
+ int i;
+
+ for (i = 0; i < NUM_ELEMENTS(video_optimized_pixel_rates); i++) {
+ const struct pixel_rate_range_table_entry *e = &video_optimized_pixel_rates[i];
+
+ if (e->range_min_khz <= pixel_rate_khz && pixel_rate_khz <= e->range_max_khz) {
+ return e;
+ }
+ }
+
+ return NULL;
+}
+#endif
+
static bool dcn20_program_pix_clk(
struct clock_source *clock_source,
struct pixel_clk_params *pix_clk_params,
@@ -1031,6 +1073,85 @@ static const struct clock_source_funcs dcn20_clk_src_funcs = {
.get_pixel_clk_frequency_100hz = get_pixel_clk_frequency_100hz
};
+#if defined(CONFIG_DRM_AMD_DC_DCN3_0)
+static bool dcn3_program_pix_clk(
+ struct clock_source *clock_source,
+ struct pixel_clk_params *pix_clk_params,
+ struct pll_settings *pll_settings)
+{
+ struct dce110_clk_src *clk_src = TO_DCE110_CLK_SRC(clock_source);
+ unsigned int inst = pix_clk_params->controller_id - CONTROLLER_ID_D0;
+ unsigned int dp_dto_ref_khz = clock_source->ctx->dc->clk_mgr->dprefclk_khz;
+ const struct pixel_rate_range_table_entry *e =
+ look_up_in_video_optimized_rate_tlb(pix_clk_params->requested_pix_clk_100hz / 10);
+
+ // For these signal types Driver to program DP_DTO without calling VBIOS Command table
+ if (dc_is_dp_signal(pix_clk_params->signal_type)) {
+ if (e) {
+ /* Set DTO values: phase = target clock, modulo = reference clock*/
+ REG_WRITE(PHASE[inst], e->target_pixel_rate_khz * e->mult_factor);
+ REG_WRITE(MODULO[inst], dp_dto_ref_khz * e->div_factor);
+ } else {
+ /* Set DTO values: phase = target clock, modulo = reference clock*/
+ REG_WRITE(PHASE[inst], pll_settings->actual_pix_clk_100hz * 100);
+ REG_WRITE(MODULO[inst], dp_dto_ref_khz * 1000);
+ }
+ REG_UPDATE(PIXEL_RATE_CNTL[inst], DP_DTO0_ENABLE, 1);
+ } else
+ // For other signal types(HDMI_TYPE_A, DVI) Driver still to call VBIOS Command table
+ dce112_program_pix_clk(clock_source, pix_clk_params, pll_settings);
+
+ return true;
+}
+
+static uint32_t dcn3_get_pix_clk_dividers(
+ struct clock_source *cs,
+ struct pixel_clk_params *pix_clk_params,
+ struct pll_settings *pll_settings)
+{
+ unsigned long long actual_pix_clk_100Hz = pix_clk_params->requested_pix_clk_100hz;
+ struct dce110_clk_src *clk_src;
+
+ clk_src = TO_DCE110_CLK_SRC(cs);
+ DC_LOGGER_INIT();
+
+ if (pix_clk_params == NULL || pll_settings == NULL
+ || pix_clk_params->requested_pix_clk_100hz == 0) {
+ DC_LOG_ERROR(
+ "%s: Invalid parameters!!\n", __func__);
+ return -1;
+ }
+
+ memset(pll_settings, 0, sizeof(*pll_settings));
+ /* Adjust for HDMI Type A deep color */
+ if (pix_clk_params->signal_type == SIGNAL_TYPE_HDMI_TYPE_A) {
+ switch (pix_clk_params->color_depth) {
+ case COLOR_DEPTH_101010:
+ actual_pix_clk_100Hz = (actual_pix_clk_100Hz * 5) >> 2;
+ break;
+ case COLOR_DEPTH_121212:
+ actual_pix_clk_100Hz = (actual_pix_clk_100Hz * 6) >> 2;
+ break;
+ case COLOR_DEPTH_161616:
+ actual_pix_clk_100Hz = actual_pix_clk_100Hz * 2;
+ break;
+ default:
+ break;
+ }
+ }
+ pll_settings->actual_pix_clk_100hz = (unsigned int) actual_pix_clk_100Hz;
+ pll_settings->adjusted_pix_clk_100hz = (unsigned int) actual_pix_clk_100Hz;
+ pll_settings->calculated_pix_clk_100hz = (unsigned int) actual_pix_clk_100Hz;
+
+ return 0;
+}
+
+static const struct clock_source_funcs dcn3_clk_src_funcs = {
+ .cs_power_down = dce110_clock_source_power_down,
+ .program_pix_clk = dcn3_program_pix_clk,
+ .get_pix_clk_dividers = dcn3_get_pix_clk_dividers
+};
+#endif
/*****************************************/
/* Constructor */
/*****************************************/
@@ -1415,3 +1536,21 @@ bool dcn20_clk_src_construct(
return ret;
}
+
+#if defined(CONFIG_DRM_AMD_DC_DCN3_0)
+bool dcn3_clk_src_construct(
+ struct dce110_clk_src *clk_src,
+ struct dc_context *ctx,
+ struct dc_bios *bios,
+ enum clock_source_id id,
+ const struct dce110_clk_src_regs *regs,
+ const struct dce110_clk_src_shift *cs_shift,
+ const struct dce110_clk_src_mask *cs_mask)
+{
+ bool ret = dce112_clk_src_construct(clk_src, ctx, bios, id, regs, cs_shift, cs_mask);
+
+ clk_src->base.funcs = &dcn3_clk_src_funcs;
+
+ return ret;
+}
+#endif
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_clock_source.h b/drivers/gpu/drm/amd/display/dc/dce/dce_clock_source.h
index 51bd25079606..69b904ab8151 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_clock_source.h
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_clock_source.h
@@ -91,6 +91,23 @@
SRII(PIXEL_RATE_CNTL, OTG, 2),\
SRII(PIXEL_RATE_CNTL, OTG, 3)
+#if defined(CONFIG_DRM_AMD_DC_DCN3_0)
+#define CS_COMMON_REG_LIST_DCN3_0(index, pllid) \
+ SRI(PIXCLK_RESYNC_CNTL, PHYPLL, pllid),\
+ SRII(PHASE, DP_DTO, 0),\
+ SRII(PHASE, DP_DTO, 1),\
+ SRII(PHASE, DP_DTO, 2),\
+ SRII(PHASE, DP_DTO, 3),\
+ SRII(MODULO, DP_DTO, 0),\
+ SRII(MODULO, DP_DTO, 1),\
+ SRII(MODULO, DP_DTO, 2),\
+ SRII(MODULO, DP_DTO, 3),\
+ SRII(PIXEL_RATE_CNTL, OTG, 0),\
+ SRII(PIXEL_RATE_CNTL, OTG, 1),\
+ SRII(PIXEL_RATE_CNTL, OTG, 2),\
+ SRII(PIXEL_RATE_CNTL, OTG, 3)
+#endif
+
#define CS_COMMON_MASK_SH_LIST_DCN2_0(mask_sh)\
CS_SF(DP_DTO0_PHASE, DP_DTO0_PHASE, mask_sh),\
CS_SF(DP_DTO0_MODULO, DP_DTO0_MODULO, mask_sh),\
@@ -204,4 +221,29 @@ bool dcn20_clk_src_construct(
const struct dce110_clk_src_shift *cs_shift,
const struct dce110_clk_src_mask *cs_mask);
+#if defined(CONFIG_DRM_AMD_DC_DCN3_0)
+bool dcn3_clk_src_construct(
+ struct dce110_clk_src *clk_src,
+ struct dc_context *ctx,
+ struct dc_bios *bios,
+ enum clock_source_id id,
+ const struct dce110_clk_src_regs *regs,
+ const struct dce110_clk_src_shift *cs_shift,
+ const struct dce110_clk_src_mask *cs_mask);
+#endif
+
+/* this table is use to find *1.001 and /1.001 pixel rates from non-precise pixel rate */
+struct pixel_rate_range_table_entry {
+ unsigned int range_min_khz;
+ unsigned int range_max_khz;
+ unsigned int target_pixel_rate_khz;
+ unsigned short mult_factor;
+ unsigned short div_factor;
+};
+
+#if defined(CONFIG_DRM_AMD_DC_DCN3_0)
+extern const struct pixel_rate_range_table_entry video_optimized_pixel_rates[];
+const struct pixel_rate_range_table_entry *look_up_in_video_optimized_rate_tlb(
+ unsigned int pixel_rate_khz);
+#endif
#endif
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_hwseq.h b/drivers/gpu/drm/amd/display/dc/dce/dce_hwseq.h
index 5479d959ec62..66b88d6ba398 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_hwseq.h
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_hwseq.h
@@ -78,6 +78,24 @@
SRII(PHYPLL_PIXEL_RATE_CNTL, blk, 4), \
SRII(PHYPLL_PIXEL_RATE_CNTL, blk, 5)
+#if defined(CONFIG_DRM_AMD_DC_DCN3_0)
+#define HWSEQ_PIXEL_RATE_REG_LIST_3(blk) \
+ SRII(PIXEL_RATE_CNTL, blk, 0), \
+ SRII(PIXEL_RATE_CNTL, blk, 1),\
+ SRII(PIXEL_RATE_CNTL, blk, 2),\
+ SRII(PIXEL_RATE_CNTL, blk, 3), \
+ SRII(PIXEL_RATE_CNTL, blk, 4), \
+ SRII(PIXEL_RATE_CNTL, blk, 5)
+
+#define HWSEQ_PHYPLL_REG_LIST_3(blk) \
+ SRII(PHYPLL_PIXEL_RATE_CNTL, blk, 0), \
+ SRII(PHYPLL_PIXEL_RATE_CNTL, blk, 1),\
+ SRII(PHYPLL_PIXEL_RATE_CNTL, blk, 2),\
+ SRII(PHYPLL_PIXEL_RATE_CNTL, blk, 3), \
+ SRII(PHYPLL_PIXEL_RATE_CNTL, blk, 4), \
+ SRII(PHYPLL_PIXEL_RATE_CNTL, blk, 5)
+#endif
+
#define HWSEQ_DCE11_REG_LIST_BASE() \
SR(DC_MEM_GLOBAL_PWR_REQ_CNTL), \
SR(DCFEV_CLOCK_CONTROL), \
@@ -200,6 +218,28 @@
SR(VGA_TEST_CONTROL), \
SR(DC_IP_REQUEST_CNTL)
+#if defined(CONFIG_DRM_AMD_DC_DCN3_0)
+#define HWSEQ_DCN30_REG_LIST()\
+ HWSEQ_DCN2_REG_LIST(),\
+ HWSEQ_DCN_REG_LIST(), \
+ HWSEQ_PIXEL_RATE_REG_LIST_3(OTG), \
+ HWSEQ_PHYPLL_REG_LIST_3(OTG), \
+ SR(MICROSECOND_TIME_BASE_DIV), \
+ SR(MILLISECOND_TIME_BASE_DIV), \
+ SR(DISPCLK_FREQ_CHANGE_CNTL), \
+ SR(RBBMIF_TIMEOUT_DIS), \
+ SR(RBBMIF_TIMEOUT_DIS_2), \
+ SR(DCHUBBUB_CRC_CTRL), \
+ SR(DPP_TOP0_DPP_CRC_CTRL), \
+ SR(DPP_TOP0_DPP_CRC_VAL_B_A), \
+ SR(DPP_TOP0_DPP_CRC_VAL_R_G), \
+ SR(MPC_CRC_CTRL), \
+ SR(MPC_CRC_RESULT_GB), \
+ SR(MPC_CRC_RESULT_C), \
+ SR(MPC_CRC_RESULT_AR), \
+ SR(AZALIA_AUDIO_DTO), \
+ SR(AZALIA_CONTROLLER_CLOCK_GATING)
+#endif
#define HWSEQ_DCN2_REG_LIST()\
HWSEQ_DCN_REG_LIST(), \
HSWEQ_DCN_PIXEL_RATE_REG_LIST(OTG, 0), \
@@ -546,6 +586,12 @@ struct dce_hwseq_registers {
HWS_SF(, VGA_TEST_CONTROL, VGA_TEST_ENABLE, mask_sh),\
HWS_SF(, VGA_TEST_CONTROL, VGA_TEST_RENDER_START, mask_sh)
+#if defined(CONFIG_DRM_AMD_DC_DCN3_0)
+#define HWSEQ_DCN30_MASK_SH_LIST(mask_sh)\
+ HWSEQ_DCN2_MASK_SH_LIST(mask_sh), \
+ HWS_SF(, AZALIA_AUDIO_DTO, AZALIA_AUDIO_DTO_MODULE, mask_sh)
+#endif
+
#define HWSEQ_DCN2_MASK_SH_LIST(mask_sh)\
HWSEQ_DCN_MASK_SH_LIST(mask_sh), \
HWS_SF(, DCHUBBUB_GLOBAL_TIMER_CNTL, DCHUBBUB_GLOBAL_TIMER_REFDIV, mask_sh), \
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_panel_cntl.c b/drivers/gpu/drm/amd/display/dc/dce/dce_panel_cntl.c
index ebff9b1e312e..43781e77be43 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_panel_cntl.c
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_panel_cntl.c
@@ -95,7 +95,7 @@ static unsigned int calculate_16_bit_backlight_from_pwm(struct dce_panel_cntl *d
return (uint32_t)(current_backlight);
}
-uint32_t dce_panel_cntl_hw_init(struct panel_cntl *panel_cntl)
+static uint32_t dce_panel_cntl_hw_init(struct panel_cntl *panel_cntl)
{
struct dce_panel_cntl *dce_panel_cntl = TO_DCE_PANEL_CNTL(panel_cntl);
uint32_t value;
@@ -155,7 +155,7 @@ uint32_t dce_panel_cntl_hw_init(struct panel_cntl *panel_cntl)
return current_backlight;
}
-bool dce_is_panel_backlight_on(struct panel_cntl *panel_cntl)
+static bool dce_is_panel_backlight_on(struct panel_cntl *panel_cntl)
{
struct dce_panel_cntl *dce_panel_cntl = TO_DCE_PANEL_CNTL(panel_cntl);
uint32_t value;
@@ -165,7 +165,7 @@ bool dce_is_panel_backlight_on(struct panel_cntl *panel_cntl)
return value;
}
-bool dce_is_panel_powered_on(struct panel_cntl *panel_cntl)
+static bool dce_is_panel_powered_on(struct panel_cntl *panel_cntl)
{
struct dce_panel_cntl *dce_panel_cntl = TO_DCE_PANEL_CNTL(panel_cntl);
uint32_t pwr_seq_state, dig_on, dig_on_ovrd;
@@ -177,7 +177,7 @@ bool dce_is_panel_powered_on(struct panel_cntl *panel_cntl)
return (pwr_seq_state == 1) || (dig_on == 1 && dig_on_ovrd == 1);
}
-void dce_store_backlight_level(struct panel_cntl *panel_cntl)
+static void dce_store_backlight_level(struct panel_cntl *panel_cntl)
{
struct dce_panel_cntl *dce_panel_cntl = TO_DCE_PANEL_CNTL(panel_cntl);
@@ -192,7 +192,7 @@ void dce_store_backlight_level(struct panel_cntl *panel_cntl)
&panel_cntl->stored_backlight_registers.LVTMA_PWRSEQ_REF_DIV_BL_PWM_REF_DIV);
}
-void dce_driver_set_backlight(struct panel_cntl *panel_cntl,
+static void dce_driver_set_backlight(struct panel_cntl *panel_cntl,
uint32_t backlight_pwm_u16_16)
{
uint32_t backlight_16bit;
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dmub_abm.c b/drivers/gpu/drm/amd/display/dc/dce/dmub_abm.c
index da0b29abfbda..0cf130dc4e52 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dmub_abm.c
+++ b/drivers/gpu/drm/amd/display/dc/dce/dmub_abm.c
@@ -50,71 +50,7 @@
#define DISABLE_ABM_IMMEDIATELY 255
-static bool dmub_abm_set_pipe(struct abm *abm, uint32_t otg_inst, uint32_t panel_inst)
-{
- union dmub_rb_cmd cmd;
- struct dc_context *dc = abm->ctx;
- uint32_t ramping_boundary = 0xFFFF;
-
- cmd.abm_set_pipe.header.type = DMUB_CMD__ABM;
- cmd.abm_set_pipe.header.sub_type = DMUB_CMD__ABM_SET_PIPE;
- cmd.abm_set_pipe.abm_set_pipe_data.otg_inst = otg_inst;
- cmd.abm_set_pipe.abm_set_pipe_data.panel_inst = panel_inst;
- cmd.abm_set_pipe.abm_set_pipe_data.ramping_boundary = ramping_boundary;
- cmd.abm_set_pipe.header.payload_bytes = sizeof(struct dmub_cmd_abm_set_pipe_data);
-
- dc_dmub_srv_cmd_queue(dc->dmub_srv, &cmd);
- dc_dmub_srv_cmd_execute(dc->dmub_srv);
- dc_dmub_srv_wait_idle(dc->dmub_srv);
-
- return true;
-}
-
-static void dmcub_set_backlight_level(
- struct dce_abm *dce_abm,
- uint32_t backlight_pwm_u16_16,
- uint32_t frame_ramp,
- uint32_t otg_inst,
- uint32_t panel_inst)
-{
- union dmub_rb_cmd cmd;
- struct dc_context *dc = dce_abm->base.ctx;
- unsigned int backlight_8_bit = 0;
- uint32_t s2;
-
- if (backlight_pwm_u16_16 & 0x10000)
- // Check for max backlight condition
- backlight_8_bit = 0xFF;
- else
- // Take MSB of fractional part since backlight is not max
- backlight_8_bit = (backlight_pwm_u16_16 >> 8) & 0xFF;
-
- dmub_abm_set_pipe(&dce_abm->base, otg_inst, panel_inst);
-
- REG_UPDATE(BL1_PWM_USER_LEVEL, BL1_PWM_USER_LEVEL, backlight_pwm_u16_16);
-
- if (otg_inst == 0)
- frame_ramp = 0;
-
- cmd.abm_set_backlight.header.type = DMUB_CMD__ABM;
- cmd.abm_set_backlight.header.sub_type = DMUB_CMD__ABM_SET_BACKLIGHT;
- cmd.abm_set_backlight.abm_set_backlight_data.frame_ramp = frame_ramp;
- cmd.abm_set_backlight.header.payload_bytes = sizeof(struct dmub_cmd_abm_set_backlight_data);
- dc_dmub_srv_cmd_queue(dc->dmub_srv, &cmd);
- dc_dmub_srv_cmd_execute(dc->dmub_srv);
- dc_dmub_srv_wait_idle(dc->dmub_srv);
-
- // Update requested backlight level
- s2 = REG_READ(BIOS_SCRATCH_2);
-
- s2 &= ~ATOM_S2_CURRENT_BL_LEVEL_MASK;
- backlight_8_bit &= (ATOM_S2_CURRENT_BL_LEVEL_MASK >>
- ATOM_S2_CURRENT_BL_LEVEL_SHIFT);
- s2 |= (backlight_8_bit << ATOM_S2_CURRENT_BL_LEVEL_SHIFT);
-
- REG_WRITE(BIOS_SCRATCH_2, s2);
-}
static void dmub_abm_enable_fractional_pwm(struct dc_context *dc)
{
@@ -211,31 +147,6 @@ static bool dmub_abm_set_level(struct abm *abm, uint32_t level)
return true;
}
-static bool dmub_abm_immediate_disable(struct abm *abm, uint32_t panel_inst)
-{
- dmub_abm_set_pipe(abm, DISABLE_ABM_IMMEDIATELY, panel_inst);
-
- return true;
-}
-
-static bool dmub_abm_set_backlight_level_pwm(
- struct abm *abm,
- unsigned int backlight_pwm_u16_16,
- unsigned int frame_ramp,
- unsigned int otg_inst,
- uint32_t panel_inst)
-{
- struct dce_abm *dce_abm = TO_DMUB_ABM(abm);
-
- dmcub_set_backlight_level(dce_abm,
- backlight_pwm_u16_16,
- frame_ramp,
- otg_inst,
- panel_inst);
-
- return true;
-}
-
static bool dmub_abm_init_config(struct abm *abm,
const char *src,
unsigned int bytes)
@@ -266,11 +177,8 @@ static bool dmub_abm_init_config(struct abm *abm,
static const struct abm_funcs abm_funcs = {
.abm_init = dmub_abm_init,
.set_abm_level = dmub_abm_set_level,
- .set_pipe = dmub_abm_set_pipe,
- .set_backlight_level_pwm = dmub_abm_set_backlight_level_pwm,
.get_current_backlight = dmub_abm_get_current_backlight,
.get_target_backlight = dmub_abm_get_target_backlight,
- .set_abm_immediate_disable = dmub_abm_immediate_disable,
.init_abm_config = dmub_abm_init_config,
};
diff --git a/drivers/gpu/drm/amd/display/dmub/inc/dmub_types.h b/drivers/gpu/drm/amd/display/dc/dce/dmub_hw_lock_mgr.c
index bed5b023a396..d399270fd17e 100644
--- a/drivers/gpu/drm/amd/display/dmub/inc/dmub_types.h
+++ b/drivers/gpu/drm/amd/display/dc/dce/dmub_hw_lock_mgr.c
@@ -23,53 +23,35 @@
*
*/
-#ifndef _DMUB_TYPES_H_
-#define _DMUB_TYPES_H_
-
-/* Basic type definitions. */
-#include <asm/byteorder.h>
-#include <linux/types.h>
-#include <linux/string.h>
-#include <linux/delay.h>
-#include <stdarg.h>
-
-#if defined(__cplusplus)
-extern "C" {
-#endif
-
-#ifndef dmub_memcpy
-#define dmub_memcpy(dest, source, bytes) memcpy((dest), (source), (bytes))
-#endif
-
-#ifndef dmub_memset
-#define dmub_memset(dest, val, bytes) memset((dest), (val), (bytes))
-#endif
-
-#ifndef dmub_udelay
-#define dmub_udelay(microseconds) udelay(microseconds)
-#endif
-
-/* Maximum number of streams on any ASIC. */
-#define DMUB_MAX_STREAMS 6
-
-/* Maximum number of planes on any ASIC. */
-#define DMUB_MAX_PLANES 6
-
-union dmub_addr {
- struct {
- uint32_t low_part;
- uint32_t high_part;
- } u;
- uint64_t quad_part;
-};
-
-struct dmub_psr_debug_flags {
- uint8_t visual_confirm : 1;
- uint8_t reserved : 7;
-};
-
-#if defined(__cplusplus)
+#include "dmub_hw_lock_mgr.h"
+#include "dc_dmub_srv.h"
+#include "dc_types.h"
+#include "core_types.h"
+
+void dmub_hw_lock_mgr_cmd(struct dc_dmub_srv *dmub_srv,
+ bool lock,
+ union dmub_hw_lock_flags *hw_locks,
+ struct dmub_hw_lock_inst_flags *inst_flags)
+{
+ union dmub_rb_cmd cmd = { 0 };
+
+ cmd.lock_hw.header.type = DMUB_CMD__HW_LOCK;
+ cmd.lock_hw.header.sub_type = 0;
+ cmd.lock_hw.header.payload_bytes = sizeof(struct dmub_cmd_lock_hw_data);
+ cmd.lock_hw.lock_hw_data.client = HW_LOCK_CLIENT_DRIVER;
+ cmd.lock_hw.lock_hw_data.lock = lock;
+ cmd.lock_hw.lock_hw_data.hw_locks.u8All = hw_locks->u8All;
+ memcpy(&cmd.lock_hw.lock_hw_data.inst_flags, inst_flags, sizeof(struct dmub_hw_lock_inst_flags));
+
+ if (!lock)
+ cmd.lock_hw.lock_hw_data.should_release = 1;
+
+ dc_dmub_srv_cmd_queue(dmub_srv, &cmd);
+ dc_dmub_srv_cmd_execute(dmub_srv);
+ dc_dmub_srv_wait_idle(dmub_srv);
}
-#endif
-#endif /* _DMUB_TYPES_H_ */
+bool should_use_dmub_lock(struct dc_link *link)
+{
+ return false;
+}
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dmub_hw_lock_mgr.h b/drivers/gpu/drm/amd/display/dc/dce/dmub_hw_lock_mgr.h
new file mode 100644
index 000000000000..bc5906347493
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dce/dmub_hw_lock_mgr.h
@@ -0,0 +1,39 @@
+/*
+ * Copyright 2012-16 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef _DMUB_HW_LOCK_MGR_H_
+#define _DMUB_HW_LOCK_MGR_H_
+
+#include "dc_dmub_srv.h"
+#include "core_types.h"
+
+void dmub_hw_lock_mgr_cmd(struct dc_dmub_srv *dmub_srv,
+ bool lock,
+ union dmub_hw_lock_flags *hw_locks,
+ struct dmub_hw_lock_inst_flags *inst_flags);
+
+bool should_use_dmub_lock(struct dc_link *link);
+
+#endif /*_DMUB_HW_LOCK_MGR_H_ */
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dmub_psr.c b/drivers/gpu/drm/amd/display/dc/dce/dmub_psr.c
index 044a0133ebb1..916d305d3022 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dmub_psr.c
+++ b/drivers/gpu/drm/amd/display/dc/dce/dmub_psr.c
@@ -231,8 +231,9 @@ static bool dmub_psr_copy_settings(struct dmub_psr *dmub,
copy_settings_data->smu_optimizations_en = psr_context->allow_smu_optimizations;
copy_settings_data->frame_delay = psr_context->frame_delay;
copy_settings_data->frame_cap_ind = psr_context->psrFrameCaptureIndicationReq;
- copy_settings_data->debug.visual_confirm = dc->dc->debug.visual_confirm == VISUAL_CONFIRM_PSR ?
+ copy_settings_data->debug.bitfields.visual_confirm = dc->dc->debug.visual_confirm == VISUAL_CONFIRM_PSR ?
true : false;
+ copy_settings_data->init_sdp_deadline = psr_context->sdpTransmitLineNumDeadline;
dc_dmub_srv_cmd_queue(dc->dmub_srv, &cmd);
dc_dmub_srv_cmd_execute(dc->dmub_srv);
diff --git a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c
index b77e9dc16086..49380ed3aeae 100644
--- a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c
+++ b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c
@@ -1069,8 +1069,17 @@ void dce110_blank_stream(struct pipe_ctx *pipe_ctx)
link->dc->hwss.set_abm_immediate_disable(pipe_ctx);
}
- if (dc_is_dp_signal(pipe_ctx->stream->signal))
+ if (dc_is_dp_signal(pipe_ctx->stream->signal)) {
pipe_ctx->stream_res.stream_enc->funcs->dp_blank(pipe_ctx->stream_res.stream_enc);
+
+ /*
+ * After output is idle pattern some sinks need time to recognize the stream
+ * has changed or they enter protection state and hang.
+ */
+ if (!dc_is_embedded_signal(pipe_ctx->stream->signal))
+ msleep(60);
+ }
+
}
@@ -1148,7 +1157,7 @@ static void build_audio_output(
pipe_ctx->stream_res.pix_clk_params.requested_pix_clk_100hz;
/*for HDMI, audio ACR is with deep color ratio factor*/
- if (dc_is_hdmi_signal(pipe_ctx->stream->signal) &&
+ if (dc_is_hdmi_tmds_signal(pipe_ctx->stream->signal) &&
audio_output->crtc_info.requested_pixel_clock_100Hz ==
(stream->timing.pix_clk_100hz)) {
if (pipe_ctx->stream_res.pix_clk_params.pixel_encoding == PIXEL_ENCODING_YCBCR420) {
@@ -1443,6 +1452,8 @@ static void power_down_encoders(struct dc *dc)
dc->links[i]->link_enc->funcs->disable_output(
dc->links[i]->link_enc, signal);
+
+ dc->links[i]->link_status.link_active = false;
}
}
@@ -1961,10 +1972,8 @@ static void dce110_setup_audio_dto(
if (pipe_ctx->top_pipe)
continue;
-
if (pipe_ctx->stream->signal != SIGNAL_TYPE_HDMI_TYPE_A)
continue;
-
if (pipe_ctx->stream_res.audio != NULL) {
struct audio_output audio_output;
@@ -2767,6 +2776,16 @@ void dce110_set_abm_immediate_disable(struct pipe_ctx *pipe_ctx)
panel_cntl->funcs->store_backlight_level(panel_cntl);
}
+void dce110_set_pipe(struct pipe_ctx *pipe_ctx)
+{
+ struct abm *abm = pipe_ctx->stream_res.abm;
+ struct panel_cntl *panel_cntl = pipe_ctx->stream->link->panel_cntl;
+ uint32_t otg_inst = pipe_ctx->stream_res.tg->inst + 1;
+
+ if (abm && panel_cntl)
+ abm->funcs->set_pipe(abm, otg_inst, panel_cntl->inst);
+}
+
static const struct hw_sequencer_funcs dce110_funcs = {
.program_gamut_remap = program_gamut_remap,
.program_output_csc = program_output_csc,
@@ -2804,6 +2823,7 @@ static const struct hw_sequencer_funcs dce110_funcs = {
.set_cursor_attribute = dce110_set_cursor_attribute,
.set_backlight_level = dce110_set_backlight_level,
.set_abm_immediate_disable = dce110_set_abm_immediate_disable,
+ .set_pipe = dce110_set_pipe,
};
static const struct hwseq_private_funcs dce110_private_funcs = {
diff --git a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.h b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.h
index fe5326df00f7..b6f3843d3d05 100644
--- a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.h
+++ b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.h
@@ -89,6 +89,7 @@ bool dce110_set_backlight_level(struct pipe_ctx *pipe_ctx,
uint32_t backlight_pwm_u16_16,
uint32_t frame_ramp);
void dce110_set_abm_immediate_disable(struct pipe_ctx *pipe_ctx);
+void dce110_set_pipe(struct pipe_ctx *pipe_ctx);
#endif /* __DC_HWSS_DCE110_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.c
index 319366ebb44f..cedf359a00f5 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.c
@@ -326,6 +326,18 @@ void hubp1_program_pixel_format(
REG_UPDATE(DCSURF_SURFACE_CONFIG,
SURFACE_PIXEL_FORMAT, 119);
break;
+#if defined(CONFIG_DRM_AMD_DC_DCN3_0)
+ case SURFACE_PIXEL_FORMAT_GRPH_RGBE:
+ REG_UPDATE_2(DCSURF_SURFACE_CONFIG,
+ SURFACE_PIXEL_FORMAT, 116,
+ ALPHA_PLANE_EN, 0);
+ break;
+ case SURFACE_PIXEL_FORMAT_GRPH_RGBE_ALPHA:
+ REG_UPDATE_2(DCSURF_SURFACE_CONFIG,
+ SURFACE_PIXEL_FORMAT, 116,
+ ALPHA_PLANE_EN, 1);
+ break;
+#endif
default:
BREAK_TO_DEBUGGER();
break;
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
index 77f16921e7f0..cb45f05a0319 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
@@ -51,6 +51,7 @@
#include "link_hwss.h"
#include "dpcd_defs.h"
#include "dsc.h"
+#include "dce/dmub_hw_lock_mgr.h"
#define DC_LOGGER_INIT(logger)
@@ -1287,7 +1288,9 @@ void dcn10_init_hw(struct dc *dc)
if (!dcb->funcs->is_accelerated_mode(dcb))
hws->funcs.disable_vga(dc->hwseq);
- hws->funcs.bios_golden_init(dc);
+ if (!dc_dmub_srv_optimized_init_done(dc->ctx->dmub_srv))
+ hws->funcs.bios_golden_init(dc);
+
if (dc->ctx->dc_bios->fw_info_valid) {
res_pool->ref_clocks.xtalin_clock_inKhz =
dc->ctx->dc_bios->fw_info.pll_info.crystal_frequency;
@@ -1393,10 +1396,10 @@ void dcn10_init_hw(struct dc *dc)
if (edp_link &&
edp_link->link_enc->funcs->is_dig_enabled &&
edp_link->link_enc->funcs->is_dig_enabled(edp_link->link_enc) &&
- dc->hwss.edp_backlight_control &&
+ dc->hwseq->funcs.edp_backlight_control &&
dc->hwss.power_down &&
dc->hwss.edp_power_control) {
- dc->hwss.edp_backlight_control(edp_link, false);
+ dc->hwseq->funcs.edp_backlight_control(edp_link, false);
dc->hwss.power_down(dc);
dc->hwss.edp_power_control(edp_link, false);
} else {
@@ -1453,6 +1456,11 @@ void dcn10_init_hw(struct dc *dc)
if (dc->clk_mgr->funcs->notify_wm_ranges)
dc->clk_mgr->funcs->notify_wm_ranges(dc->clk_mgr);
+#ifdef CONFIG_DRM_AMD_DC_DCN3_0
+ if (dc->clk_mgr->funcs->set_hard_max_memclk)
+ dc->clk_mgr->funcs->set_hard_max_memclk(dc->clk_mgr);
+#endif
+
}
void dcn10_reset_hw_ctx_wrap(
@@ -1758,8 +1766,20 @@ void dcn10_cursor_lock(struct dc *dc, struct pipe_ctx *pipe, bool lock)
if (lock)
delay_cursor_until_vupdate(dc, pipe);
- dc->res_pool->mpc->funcs->cursor_lock(dc->res_pool->mpc,
- pipe->stream_res.opp->inst, lock);
+ if (pipe->stream && should_use_dmub_lock(pipe->stream->link)) {
+ union dmub_hw_lock_flags hw_locks = { 0 };
+ struct dmub_hw_lock_inst_flags inst_flags = { 0 };
+
+ hw_locks.bits.lock_cursor = 1;
+ inst_flags.opp_inst = pipe->stream_res.opp->inst;
+
+ dmub_hw_lock_mgr_cmd(dc->ctx->dmub_srv,
+ lock,
+ &hw_locks,
+ &inst_flags);
+ } else
+ dc->res_pool->mpc->funcs->cursor_lock(dc->res_pool->mpc,
+ pipe->stream_res.opp->inst, lock);
}
static bool wait_for_reset_trigger_to_occur(
@@ -2576,14 +2596,15 @@ void dcn10_blank_pixel_data(
if (stream_res->tg->funcs->set_blank)
stream_res->tg->funcs->set_blank(stream_res->tg, blank);
if (stream_res->abm) {
- stream_res->abm->funcs->set_pipe(stream_res->abm, stream_res->tg->inst + 1,
- stream->link->panel_cntl->inst);
+ dc->hwss.set_pipe(pipe_ctx);
stream_res->abm->funcs->set_abm_level(stream_res->abm, stream->abm_level);
}
} else if (blank) {
dc->hwss.set_abm_immediate_disable(pipe_ctx);
- if (stream_res->tg->funcs->set_blank)
+ if (stream_res->tg->funcs->set_blank) {
+ stream_res->tg->funcs->wait_for_state(stream_res->tg, CRTC_STATE_VBLANK);
stream_res->tg->funcs->set_blank(stream_res->tg, blank);
+ }
}
}
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_init.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_init.c
index 7cb8c3fb2665..f6a790c49321 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_init.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_init.c
@@ -75,6 +75,7 @@ static const struct hw_sequencer_funcs dcn10_funcs = {
.calc_vupdate_position = dcn10_calc_vupdate_position,
.set_backlight_level = dce110_set_backlight_level,
.set_abm_immediate_disable = dce110_set_abm_immediate_disable,
+ .set_pipe = dce110_set_pipe,
};
static const struct hwseq_private_funcs dcn10_private_funcs = {
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_link_encoder.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_link_encoder.c
index 7fd385be3f3d..81db0179f7ea 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_link_encoder.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_link_encoder.c
@@ -619,11 +619,17 @@ bool dcn10_link_encoder_validate_dvi_output(
static bool dcn10_link_encoder_validate_hdmi_output(
const struct dcn10_link_encoder *enc10,
const struct dc_crtc_timing *crtc_timing,
+ const struct dc_edid_caps *edid_caps,
int adjusted_pix_clk_100hz)
{
enum dc_color_depth max_deep_color =
enc10->base.features.max_hdmi_deep_color;
+ // check pixel clock against edid specified max TMDS clk
+ if (edid_caps->max_tmds_clk_mhz != 0 &&
+ adjusted_pix_clk_100hz > edid_caps->max_tmds_clk_mhz * 10000)
+ return false;
+
if (max_deep_color < crtc_timing->display_color_depth)
return false;
@@ -801,6 +807,7 @@ bool dcn10_link_encoder_validate_output_with_stream(
is_valid = dcn10_link_encoder_validate_hdmi_output(
enc10,
&stream->timing,
+ &stream->sink->edid_caps,
stream->phy_pix_clk * 10);
break;
case SIGNAL_TYPE_DISPLAY_PORT:
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_link_encoder.h b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_link_encoder.h
index 68395bcc24fd..cf59ab0034dc 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_link_encoder.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_link_encoder.h
@@ -153,6 +153,12 @@ struct dcn10_link_enc_registers {
uint32_t RAWLANE2_DIG_PCS_XF_RX_OVRD_IN_3;
uint32_t RAWLANE3_DIG_PCS_XF_RX_OVRD_IN_2;
uint32_t RAWLANE3_DIG_PCS_XF_RX_OVRD_IN_3;
+#if defined(CONFIG_DRM_AMD_DC_DCN3_0)
+ uint32_t TMDS_DCBALANCER_CONTROL;
+ uint32_t PHYA_LINK_CNTL2;
+ uint32_t PHYB_LINK_CNTL2;
+ uint32_t PHYC_LINK_CNTL2;
+#endif
};
#define LE_SF(reg_name, field_name, post_fix)\
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.c
index ec0ab42becba..2972392f9788 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.c
@@ -288,8 +288,19 @@ void optc1_program_timing(
if (optc1_is_two_pixels_per_containter(&patched_crtc_timing) || optc1->opp_count == 2)
h_div = H_TIMING_DIV_BY2;
- REG_UPDATE(OTG_H_TIMING_CNTL,
+#if defined(CONFIG_DRM_AMD_DC_DCN3_0)
+ if (optc1->tg_mask->OTG_H_TIMING_DIV_MODE != 0) {
+ if (optc1->opp_count == 4)
+ h_div = H_TIMING_DIV_BY4;
+
+ REG_UPDATE(OTG_H_TIMING_CNTL,
+ OTG_H_TIMING_DIV_MODE, h_div);
+ } else
+#endif
+ {
+ REG_UPDATE(OTG_H_TIMING_CNTL,
OTG_H_TIMING_DIV_BY2, h_div);
+ }
}
void optc1_set_vtg_params(struct timing_generator *optc,
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.h b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.h
index 8d1e52fb0393..b38475285835 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.h
@@ -171,6 +171,15 @@ struct dcn_optc_registers {
uint32_t OPTC_DATA_FORMAT_CONTROL;
uint32_t OPTC_BYTES_PER_PIXEL;
uint32_t OPTC_WIDTH_CONTROL;
+#if defined(CONFIG_DRM_AMD_DC_DCN3_0)
+ uint32_t OTG_BLANK_DATA_COLOR;
+ uint32_t OTG_BLANK_DATA_COLOR_EXT;
+ uint32_t OTG_DRR_TRIGGER_WINDOW;
+ uint32_t OTG_M_CONST_DTO0;
+ uint32_t OTG_M_CONST_DTO1;
+ uint32_t OTG_DRR_V_TOTAL_CHANGE;
+ uint32_t OTG_GLOBAL_CONTROL4;
+#endif
};
#define TG_COMMON_MASK_SH_LIST_DCN(mask_sh)\
@@ -296,6 +305,8 @@ struct dcn_optc_registers {
SF(GSL_SOURCE_SELECT, GSL2_READY_SOURCE_SEL, mask_sh),\
SF(OTG0_OTG_GLOBAL_CONTROL2, MANUAL_FLOW_CONTROL_SEL, mask_sh)
+
+
#define TG_COMMON_MASK_SH_LIST_DCN1_0(mask_sh)\
TG_COMMON_MASK_SH_LIST_DCN(mask_sh),\
SF(OTG0_OTG_TEST_PATTERN_PARAMETERS, OTG_TEST_PATTERN_INC0, mask_sh),\
@@ -385,6 +396,13 @@ struct dcn_optc_registers {
type OTG_BLACK_COLOR_B_CB;\
type OTG_BLACK_COLOR_G_Y;\
type OTG_BLACK_COLOR_R_CR;\
+ type OTG_BLANK_DATA_COLOR_BLUE_CB;\
+ type OTG_BLANK_DATA_COLOR_GREEN_Y;\
+ type OTG_BLANK_DATA_COLOR_RED_CR;\
+ type OTG_BLANK_DATA_COLOR_BLUE_CB_EXT;\
+ type OTG_BLANK_DATA_COLOR_GREEN_Y_EXT;\
+ type OTG_BLANK_DATA_COLOR_RED_CR_EXT;\
+ type OTG_VTOTAL_MID_REPLACING_MIN_EN;\
type OTG_TEST_PATTERN_INC0;\
type OTG_TEST_PATTERN_INC1;\
type OTG_TEST_PATTERN_VRES;\
@@ -456,9 +474,17 @@ struct dcn_optc_registers {
type MANUAL_FLOW_CONTROL;\
type MANUAL_FLOW_CONTROL_SEL;
+#if defined(CONFIG_DRM_AMD_DC_DCN3_0)
#define TG_REG_FIELD_LIST(type) \
TG_REG_FIELD_LIST_DCN1_0(type)\
+ type OTG_V_SYNC_MODE;\
+ type OTG_DRR_TRIGGER_WINDOW_START_X;\
+ type OTG_DRR_TRIGGER_WINDOW_END_X;\
+ type OTG_DRR_V_TOTAL_CHANGE_LIMIT;\
+ type OTG_OUT_MUX;\
+ type OTG_M_CONST_DTO_PHASE;\
+ type OTG_M_CONST_DTO_MODULO;\
type MASTER_UPDATE_LOCK_DB_X;\
type MASTER_UPDATE_LOCK_DB_Y;\
type MASTER_UPDATE_LOCK_DB_EN;\
@@ -469,6 +495,8 @@ struct dcn_optc_registers {
type OPTC_NUM_OF_INPUT_SEGMENT;\
type OPTC_SEG0_SRC_SEL;\
type OPTC_SEG1_SRC_SEL;\
+ type OPTC_SEG2_SRC_SEL;\
+ type OPTC_SEG3_SRC_SEL;\
type OPTC_MEM_SEL;\
type OPTC_DATA_FORMAT;\
type OPTC_DSC_MODE;\
@@ -477,11 +505,45 @@ struct dcn_optc_registers {
type OPTC_SEGMENT_WIDTH;\
type OPTC_DWB0_SOURCE_SELECT;\
type OPTC_DWB1_SOURCE_SELECT;\
+ type MASTER_UPDATE_LOCK_DB_START_X;\
+ type MASTER_UPDATE_LOCK_DB_END_X;\
+ type MASTER_UPDATE_LOCK_DB_START_Y;\
+ type MASTER_UPDATE_LOCK_DB_END_Y;\
+ type DIG_UPDATE_POSITION_X;\
+ type DIG_UPDATE_POSITION_Y;\
+ type OTG_H_TIMING_DIV_MODE;\
+ type OTG_DRR_TIMING_DBUF_UPDATE_MODE;\
type OTG_CRC_DSC_MODE;\
type OTG_CRC_DATA_STREAM_COMBINE_MODE;\
type OTG_CRC_DATA_STREAM_SPLIT_MODE;\
type OTG_CRC_DATA_FORMAT;
+#else
+#define TG_REG_FIELD_LIST(type) \
+ TG_REG_FIELD_LIST_DCN1_0(type)\
+ type MASTER_UPDATE_LOCK_DB_X;\
+ type MASTER_UPDATE_LOCK_DB_Y;\
+ type MASTER_UPDATE_LOCK_DB_EN;\
+ type GLOBAL_UPDATE_LOCK_EN;\
+ type DIG_UPDATE_LOCATION;\
+ type OTG_DSC_START_POSITION_X;\
+ type OTG_DSC_START_POSITION_LINE_NUM;\
+ type OPTC_NUM_OF_INPUT_SEGMENT;\
+ type OPTC_SEG0_SRC_SEL;\
+ type OPTC_SEG1_SRC_SEL;\
+ type OPTC_MEM_SEL;\
+ type OPTC_DATA_FORMAT;\
+ type OPTC_DSC_MODE;\
+ type OPTC_DSC_BYTES_PER_PIXEL;\
+ type OPTC_DSC_SLICE_WIDTH;\
+ type OPTC_SEGMENT_WIDTH;\
+ type OPTC_DWB0_SOURCE_SELECT;\
+ type OPTC_DWB1_SOURCE_SELECT;\
+ type OTG_CRC_DSC_MODE;\
+ type OTG_CRC_DATA_STREAM_COMBINE_MODE;\
+ type OTG_CRC_DATA_STREAM_SPLIT_MODE;\
+ type OTG_CRC_DATA_FORMAT;
+#endif
struct dcn_optc_shift {
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_stream_encoder.h b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_stream_encoder.h
index f9b9e221c698..ed385b1477be 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_stream_encoder.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_stream_encoder.h
@@ -169,6 +169,14 @@ struct dcn10_stream_enc_registers {
uint32_t DP_SEC_METADATA_TRANSMISSION;
uint32_t HDMI_METADATA_PACKET_CONTROL;
uint32_t DP_SEC_FRAMING4;
+#if defined(CONFIG_DRM_AMD_DC_DCN3_0)
+ uint32_t DP_GSP11_CNTL;
+ uint32_t HDMI_GENERIC_PACKET_CONTROL6;
+ uint32_t HDMI_GENERIC_PACKET_CONTROL7;
+ uint32_t HDMI_GENERIC_PACKET_CONTROL8;
+ uint32_t HDMI_GENERIC_PACKET_CONTROL9;
+ uint32_t HDMI_GENERIC_PACKET_CONTROL10;
+#endif
uint32_t DIG_CLOCK_PATTERN;
};
@@ -483,14 +491,48 @@ struct dcn10_stream_enc_registers {
type DP_PIXEL_COMBINE;\
type DP_SST_SDP_SPLITTING
+#if defined(CONFIG_DRM_AMD_DC_DCN3_0)
+#define SE_REG_FIELD_LIST_DCN3_0(type) \
+ type HDMI_GENERIC8_CONT;\
+ type HDMI_GENERIC8_SEND;\
+ type HDMI_GENERIC8_LINE;\
+ type HDMI_GENERIC9_CONT;\
+ type HDMI_GENERIC9_SEND;\
+ type HDMI_GENERIC9_LINE;\
+ type HDMI_GENERIC10_CONT;\
+ type HDMI_GENERIC10_SEND;\
+ type HDMI_GENERIC10_LINE;\
+ type HDMI_GENERIC11_CONT;\
+ type HDMI_GENERIC11_SEND;\
+ type HDMI_GENERIC11_LINE;\
+ type HDMI_GENERIC12_CONT;\
+ type HDMI_GENERIC12_SEND;\
+ type HDMI_GENERIC12_LINE;\
+ type HDMI_GENERIC13_CONT;\
+ type HDMI_GENERIC13_SEND;\
+ type HDMI_GENERIC13_LINE;\
+ type HDMI_GENERIC14_CONT;\
+ type HDMI_GENERIC14_SEND;\
+ type HDMI_GENERIC14_LINE;\
+ type DP_SEC_GSP11_PPS;\
+ type DP_SEC_GSP11_ENABLE;\
+ type DP_SEC_GSP11_LINE_NUM
+#endif
+
struct dcn10_stream_encoder_shift {
SE_REG_FIELD_LIST_DCN1_0(uint8_t);
SE_REG_FIELD_LIST_DCN2_0(uint8_t);
+#if defined(CONFIG_DRM_AMD_DC_DCN3_0)
+ SE_REG_FIELD_LIST_DCN3_0(uint8_t);
+#endif
};
struct dcn10_stream_encoder_mask {
SE_REG_FIELD_LIST_DCN1_0(uint32_t);
SE_REG_FIELD_LIST_DCN2_0(uint32_t);
+#if defined(CONFIG_DRM_AMD_DC_DCN3_0)
+ SE_REG_FIELD_LIST_DCN3_0(uint32_t);
+#endif
};
struct dcn10_stream_encoder {
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dccg.h b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dccg.h
index 2205cb0204e7..06daf35bb587 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dccg.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dccg.h
@@ -76,18 +76,40 @@
type REFCLK_CLOCK_EN;\
type REFCLK_SRC_SEL;
+#if defined(CONFIG_DRM_AMD_DC_DCN3_0)
+#define DCCG3_REG_FIELD_LIST(type) \
+ type PHYASYMCLK_FORCE_EN;\
+ type PHYASYMCLK_FORCE_SRC_SEL;\
+ type PHYBSYMCLK_FORCE_EN;\
+ type PHYBSYMCLK_FORCE_SRC_SEL;\
+ type PHYCSYMCLK_FORCE_EN;\
+ type PHYCSYMCLK_FORCE_SRC_SEL;
+#endif
+
struct dccg_shift {
DCCG_REG_FIELD_LIST(uint8_t)
+#if defined(CONFIG_DRM_AMD_DC_DCN3_0)
+ DCCG3_REG_FIELD_LIST(uint8_t)
+#endif
};
struct dccg_mask {
DCCG_REG_FIELD_LIST(uint32_t)
+#if defined(CONFIG_DRM_AMD_DC_DCN3_0)
+ DCCG3_REG_FIELD_LIST(uint32_t)
+#endif
};
struct dccg_registers {
uint32_t DPPCLK_DTO_CTRL;
uint32_t DPPCLK_DTO_PARAM[6];
uint32_t REFCLK_CNTL;
+#if defined(CONFIG_DRM_AMD_DC_DCN3_0)
+ uint32_t HDMICHARCLK_CLOCK_CNTL[6];
+ uint32_t PHYASYMCLK_CLOCK_CNTL;
+ uint32_t PHYBSYMCLK_CLOCK_CNTL;
+ uint32_t PHYCSYMCLK_CLOCK_CNTL;
+#endif
};
struct dcn_dccg {
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dpp.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dpp.c
index 42bba7c9548b..4af96cc5d9d6 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dpp.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dpp.c
@@ -181,9 +181,11 @@ static void dpp2_cnv_setup (
break;
case SURFACE_PIXEL_FORMAT_GRPH_RGB111110_FIX:
pixel_format = 112;
+ alpha_en = 0;
break;
case SURFACE_PIXEL_FORMAT_GRPH_BGR101111_FIX:
pixel_format = 113;
+ alpha_en = 0;
break;
case SURFACE_PIXEL_FORMAT_VIDEO_ACrYCb2101010:
pixel_format = 114;
@@ -199,9 +201,11 @@ static void dpp2_cnv_setup (
break;
case SURFACE_PIXEL_FORMAT_GRPH_RGB111110_FLOAT:
pixel_format = 118;
+ alpha_en = 0;
break;
case SURFACE_PIXEL_FORMAT_GRPH_BGR101111_FLOAT:
pixel_format = 119;
+ alpha_en = 0;
break;
default:
break;
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dsc.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dsc.c
index 1b1ae9ce2799..3c6ecfe141bb 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dsc.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dsc.c
@@ -717,22 +717,5 @@ static void dsc_write_to_registers(struct display_stream_compressor *dsc, const
RANGE_MAX_QP14, reg_vals->pps.rc_range_params[14].range_max_qp,
RANGE_BPG_OFFSET14, reg_vals->pps.rc_range_params[14].range_bpg_offset);
- if (IS_FPGA_MAXIMUS_DC(dsc20->base.ctx->dce_environment)) {
- /* It's safe to do this as long as debug bus is not being used in DAL Diag environment.
- *
- * This is because DSCC_PPS_CONFIG4.INITIAL_DEC_DELAY is a read-only register field (because it's a decoder
- * value not required by DSC encoder). However, since decoding fails when this value is missing from PPS, it's
- * required to communicate this value to the PPS header. When testing on FPGA, the values for PPS header are
- * being read from Diag register dump. The register below is used in place of a scratch register to make
- * 'initial_dec_delay' available.
- */
-
- temp_int = reg_vals->pps.initial_dec_delay;
- REG_SET_4(DSCC_TEST_DEBUG_BUS_ROTATE, 0,
- DSCC_TEST_DEBUG_BUS0_ROTATE, temp_int & 0x1f,
- DSCC_TEST_DEBUG_BUS1_ROTATE, temp_int >> 5 & 0x1f,
- DSCC_TEST_DEBUG_BUS2_ROTATE, temp_int >> 10 & 0x1f,
- DSCC_TEST_DEBUG_BUS3_ROTATE, temp_int >> 15 & 0x1);
- }
}
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dsc.h b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dsc.h
index 9855a7ed0387..667640c4b288 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dsc.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dsc.h
@@ -78,7 +78,6 @@
SRI(DSCC_RATE_CONTROL_BUFFER1_MAX_FULLNESS_LEVEL, DSCC, id),\
SRI(DSCC_RATE_CONTROL_BUFFER2_MAX_FULLNESS_LEVEL, DSCC, id),\
SRI(DSCC_RATE_CONTROL_BUFFER3_MAX_FULLNESS_LEVEL, DSCC, id),\
- SRI(DSCC_TEST_DEBUG_BUS_ROTATE, DSCC, id),\
SRI(DSCCIF_CONFIG0, DSCCIF, id),\
SRI(DSCCIF_CONFIG1, DSCCIF, id),\
SRI(DSCRM_DSC_FORWARD_CONFIG, DSCRM, id)
@@ -95,8 +94,6 @@
DSC_SF(DSC_TOP0_DSC_TOP_CONTROL, DSC_CLOCK_EN, mask_sh), \
DSC_SF(DSC_TOP0_DSC_TOP_CONTROL, DSC_DISPCLK_R_GATE_DIS, mask_sh), \
DSC_SF(DSC_TOP0_DSC_TOP_CONTROL, DSC_DSCCLK_R_GATE_DIS, mask_sh), \
- DSC_SF(DSC_TOP0_DSC_DEBUG_CONTROL, DSC_DBG_EN, mask_sh), \
- DSC_SF(DSC_TOP0_DSC_DEBUG_CONTROL, DSC_TEST_CLOCK_MUX_SEL, mask_sh), \
DSC_SF(DSCC0_DSCC_CONFIG0, ICH_RESET_AT_END_OF_LINE, mask_sh), \
DSC_SF(DSCC0_DSCC_CONFIG0, NUMBER_OF_SLICES_PER_LINE, mask_sh), \
DSC_SF(DSCC0_DSCC_CONFIG0, ALTERNATE_ICH_ENCODING_EN, mask_sh), \
@@ -249,10 +246,6 @@
DSC_SF(DSCC0_DSCC_RATE_CONTROL_BUFFER1_MAX_FULLNESS_LEVEL, DSCC_RATE_CONTROL_BUFFER1_MAX_FULLNESS_LEVEL, mask_sh), \
DSC_SF(DSCC0_DSCC_RATE_CONTROL_BUFFER2_MAX_FULLNESS_LEVEL, DSCC_RATE_CONTROL_BUFFER2_MAX_FULLNESS_LEVEL, mask_sh), \
DSC_SF(DSCC0_DSCC_RATE_CONTROL_BUFFER3_MAX_FULLNESS_LEVEL, DSCC_RATE_CONTROL_BUFFER3_MAX_FULLNESS_LEVEL, mask_sh), \
- DSC_SF(DSCC0_DSCC_TEST_DEBUG_BUS_ROTATE, DSCC_TEST_DEBUG_BUS0_ROTATE, mask_sh), \
- DSC_SF(DSCC0_DSCC_TEST_DEBUG_BUS_ROTATE, DSCC_TEST_DEBUG_BUS1_ROTATE, mask_sh), \
- DSC_SF(DSCC0_DSCC_TEST_DEBUG_BUS_ROTATE, DSCC_TEST_DEBUG_BUS2_ROTATE, mask_sh), \
- DSC_SF(DSCC0_DSCC_TEST_DEBUG_BUS_ROTATE, DSCC_TEST_DEBUG_BUS3_ROTATE, mask_sh), \
DSC_SF(DSCCIF0_DSCCIF_CONFIG0, INPUT_INTERFACE_UNDERFLOW_RECOVERY_EN, mask_sh), \
DSC_SF(DSCCIF0_DSCCIF_CONFIG0, INPUT_INTERFACE_UNDERFLOW_OCCURRED_INT_EN, mask_sh), \
DSC_SF(DSCCIF0_DSCCIF_CONFIG0, INPUT_INTERFACE_UNDERFLOW_OCCURRED_STATUS, mask_sh), \
@@ -427,10 +420,6 @@
type DSCC_UPDATE_PENDING_STATUS; \
type DSCC_UPDATE_TAKEN_STATUS; \
type DSCC_UPDATE_TAKEN_ACK; \
- type DSCC_TEST_DEBUG_BUS0_ROTATE; \
- type DSCC_TEST_DEBUG_BUS1_ROTATE; \
- type DSCC_TEST_DEBUG_BUS2_ROTATE; \
- type DSCC_TEST_DEBUG_BUS3_ROTATE; \
type DSCC_RATE_BUFFER0_FULLNESS_LEVEL; \
type DSCC_RATE_BUFFER1_FULLNESS_LEVEL; \
type DSCC_RATE_BUFFER2_FULLNESS_LEVEL; \
@@ -503,7 +492,6 @@ struct dcn20_dsc_registers {
uint32_t DSCC_RATE_CONTROL_BUFFER1_MAX_FULLNESS_LEVEL;
uint32_t DSCC_RATE_CONTROL_BUFFER2_MAX_FULLNESS_LEVEL;
uint32_t DSCC_RATE_CONTROL_BUFFER3_MAX_FULLNESS_LEVEL;
- uint32_t DSCC_TEST_DEBUG_BUS_ROTATE;
uint32_t DSCCIF_CONFIG0;
uint32_t DSCCIF_CONFIG1;
uint32_t DSCRM_DSC_FORWARD_CONFIG;
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubbub.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubbub.c
index c0b21d7450d4..69d49551ab5d 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubbub.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubbub.c
@@ -153,6 +153,10 @@ bool hubbub2_dcc_support_pixel_format(
case SURFACE_PIXEL_FORMAT_GRPH_BGR101111_FIX:
case SURFACE_PIXEL_FORMAT_GRPH_RGB111110_FLOAT:
case SURFACE_PIXEL_FORMAT_GRPH_BGR101111_FLOAT:
+#if defined(CONFIG_DRM_AMD_DC_DCN3_0)
+ case SURFACE_PIXEL_FORMAT_GRPH_RGBE:
+ case SURFACE_PIXEL_FORMAT_GRPH_RGBE_ALPHA:
+#endif
*bytes_per_element = 4;
return true;
case SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616:
@@ -339,6 +343,11 @@ static enum dcn_hubbub_page_table_block_size page_table_block_size_to_hw(unsigne
case 65536:
block_size = DCN_PAGE_TABLE_BLOCK_SIZE_64KB;
break;
+#if defined(CONFIG_DRM_AMD_DC_DCN3_0)
+ case 32768:
+ block_size = DCN_PAGE_TABLE_BLOCK_SIZE_32KB;
+ break;
+#endif
default:
ASSERT(false);
block_size = page_table_block_size;
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubp.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubp.c
index 84d7ac5dd206..bb920d0e0b89 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubp.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubp.c
@@ -336,6 +336,10 @@ void hubp2_program_size(
*/
use_pitch_c = format >= SURFACE_PIXEL_FORMAT_VIDEO_BEGIN
&& format < SURFACE_PIXEL_FORMAT_SUBSAMPLE_END;
+#if defined(CONFIG_DRM_AMD_DC_DCN3_0)
+ use_pitch_c = use_pitch_c
+ || (format == SURFACE_PIXEL_FORMAT_GRPH_RGBE_ALPHA);
+#endif
if (use_pitch_c) {
ASSERT(plane_size->chroma_pitch != 0);
/* Chroma pitch zero can cause system hang! */
@@ -360,6 +364,10 @@ void hubp2_program_size(
PITCH, pitch, META_PITCH, meta_pitch);
use_pitch_c = format >= SURFACE_PIXEL_FORMAT_VIDEO_BEGIN;
+#if defined(CONFIG_DRM_AMD_DC_DCN3_0)
+ use_pitch_c = use_pitch_c
+ || (format == SURFACE_PIXEL_FORMAT_GRPH_RGBE_ALPHA);
+#endif
if (use_pitch_c)
REG_UPDATE_2(DCSURF_SURFACE_PITCH_C,
PITCH_C, pitch_c, META_PITCH_C, meta_pitch_c);
@@ -505,6 +513,18 @@ void hubp2_program_pixel_format(
REG_UPDATE(DCSURF_SURFACE_CONFIG,
SURFACE_PIXEL_FORMAT, 119);
break;
+#if defined(CONFIG_DRM_AMD_DC_DCN3_0)
+ case SURFACE_PIXEL_FORMAT_GRPH_RGBE:
+ REG_UPDATE_2(DCSURF_SURFACE_CONFIG,
+ SURFACE_PIXEL_FORMAT, 116,
+ ALPHA_PLANE_EN, 0);
+ break;
+ case SURFACE_PIXEL_FORMAT_GRPH_RGBE_ALPHA:
+ REG_UPDATE_2(DCSURF_SURFACE_CONFIG,
+ SURFACE_PIXEL_FORMAT, 116,
+ ALPHA_PLANE_EN, 1);
+ break;
+#endif
default:
BREAK_TO_DEBUGGER();
break;
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubp.h b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubp.h
index 8c04a3606a54..4a2c93087459 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubp.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubp.h
@@ -157,6 +157,12 @@
uint32_t VBLANK_PARAMETERS_5;\
uint32_t VBLANK_PARAMETERS_6
+#if defined(CONFIG_DRM_AMD_DC_DCN3_0)
+#define DCN30_HUBP_REG_COMMON_VARIABLE_LIST \
+ DCN21_HUBP_REG_COMMON_VARIABLE_LIST;\
+ uint32_t DCN_DMDATA_VM_CNTL
+#endif
+
#define DCN2_HUBP_REG_FIELD_VARIABLE_LIST(type) \
DCN_HUBP_REG_FIELD_BASE_LIST(type); \
type DMDATA_ADDRESS_HIGH;\
@@ -192,17 +198,52 @@
type REFCYC_PER_META_CHUNK_FLIP_C; \
type VM_GROUP_SIZE
+#if defined(CONFIG_DRM_AMD_DC_DCN3_0)
+#define DCN30_HUBP_REG_FIELD_VARIABLE_LIST(type) \
+ DCN21_HUBP_REG_FIELD_VARIABLE_LIST(type);\
+ type PRIMARY_SURFACE_DCC_IND_BLK;\
+ type SECONDARY_SURFACE_DCC_IND_BLK;\
+ type PRIMARY_SURFACE_DCC_IND_BLK_C;\
+ type SECONDARY_SURFACE_DCC_IND_BLK_C;\
+ type ALPHA_PLANE_EN;\
+ type REFCYC_PER_VM_DMDATA;\
+ type DMDATA_VM_FAULT_STATUS;\
+ type DMDATA_VM_FAULT_STATUS_CLEAR; \
+ type DMDATA_VM_UNDERFLOW_STATUS;\
+ type DMDATA_VM_LATE_STATUS;\
+ type DMDATA_VM_UNDERFLOW_STATUS_CLEAR; \
+ type DMDATA_VM_DONE; \
+ type CROSSBAR_SRC_Y_G; \
+ type CROSSBAR_SRC_ALPHA; \
+ type PACK_3TO2_ELEMENT_DISABLE; \
+ type ROW_TTU_MODE; \
+ type NUM_PKRS
+#endif
struct dcn_hubp2_registers {
+#if defined(CONFIG_DRM_AMD_DC_DCN3_0)
+ DCN30_HUBP_REG_COMMON_VARIABLE_LIST;
+#else
DCN21_HUBP_REG_COMMON_VARIABLE_LIST;
+#endif
};
struct dcn_hubp2_shift {
+#if defined(CONFIG_DRM_AMD_DC_DCN3_0)
+ DCN30_HUBP_REG_FIELD_VARIABLE_LIST(uint8_t);
+#else
DCN21_HUBP_REG_FIELD_VARIABLE_LIST(uint8_t);
+#endif
+
};
struct dcn_hubp2_mask {
+#if defined(CONFIG_DRM_AMD_DC_DCN3_0)
+ DCN30_HUBP_REG_FIELD_VARIABLE_LIST(uint32_t);
+#else
DCN21_HUBP_REG_FIELD_VARIABLE_LIST(uint32_t);
+#endif
+
};
struct dcn20_hubp {
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c
index da5333d165ac..5621c95177d2 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c
@@ -49,6 +49,8 @@
#include "dc_link_dp.h"
#include "vm_helper.h"
#include "dccg.h"
+#include "dc_dmub_srv.h"
+#include "dce/dmub_hw_lock_mgr.h"
#define DC_LOGGER_INIT(logger)
@@ -291,12 +293,20 @@ void dcn20_init_blank(
/* get the OPTC source */
tg->funcs->get_optc_source(tg, &num_opps, &opp_id_src0, &opp_id_src1);
- ASSERT(opp_id_src0 < dc->res_pool->res_cap->num_opp);
+
+ if (opp_id_src0 >= dc->res_pool->res_cap->num_opp) {
+ ASSERT(false);
+ return;
+ }
opp = dc->res_pool->opps[opp_id_src0];
if (num_opps == 2) {
otg_active_width = otg_active_width / 2;
- ASSERT(opp_id_src1 < dc->res_pool->res_cap->num_opp);
+
+ if (opp_id_src1 >= dc->res_pool->res_cap->num_opp) {
+ ASSERT(false);
+ return;
+ }
bottom_opp = dc->res_pool->opps[opp_id_src1];
}
@@ -601,6 +611,31 @@ void dcn20_disable_plane(struct dc *dc, struct pipe_ctx *pipe_ctx)
pipe_ctx->pipe_idx);
}
+#if defined(CONFIG_DRM_AMD_DC_DCN3_0)
+static int calc_mpc_flow_ctrl_cnt(const struct dc_stream_state *stream,
+ int opp_cnt)
+{
+ bool hblank_halved = optc2_is_two_pixels_per_containter(&stream->timing);
+ int flow_ctrl_cnt;
+
+ if (opp_cnt >= 2)
+ hblank_halved = true;
+
+ flow_ctrl_cnt = stream->timing.h_total - stream->timing.h_addressable -
+ stream->timing.h_border_left -
+ stream->timing.h_border_right;
+
+ if (hblank_halved)
+ flow_ctrl_cnt /= 2;
+
+ /* ODM combine 4:1 case */
+ if (opp_cnt == 4)
+ flow_ctrl_cnt /= 2;
+
+ return flow_ctrl_cnt;
+}
+#endif
+
enum dc_status dcn20_enable_stream_timing(
struct pipe_ctx *pipe_ctx,
struct dc_state *context,
@@ -614,6 +649,15 @@ enum dc_status dcn20_enable_stream_timing(
int opp_cnt = 1;
int opp_inst[MAX_PIPES] = { pipe_ctx->stream_res.opp->inst };
+#if defined(CONFIG_DRM_AMD_DC_DCN3_0)
+ bool interlace = stream->timing.flags.INTERLACE;
+ int i;
+
+ struct mpc_dwb_flow_control flow_control;
+ struct mpc *mpc = dc->res_pool->mpc;
+ bool rate_control_2x_pclk = (interlace || optc2_is_two_pixels_per_containter(&stream->timing));
+
+#endif
/* by upper caller loop, pipe0 is parent pipe and be called first.
* back end is set up by for pipe0. Other children pipe share back end
* with pipe 0. No program is needed.
@@ -660,6 +704,21 @@ enum dc_status dcn20_enable_stream_timing(
pipe_ctx->stream->signal,
true);
+#if defined(CONFIG_DRM_AMD_DC_DCN3_0)
+ rate_control_2x_pclk = rate_control_2x_pclk || opp_cnt > 1;
+ flow_control.flow_ctrl_mode = 0;
+ flow_control.flow_ctrl_cnt0 = 0x80;
+ flow_control.flow_ctrl_cnt1 = calc_mpc_flow_ctrl_cnt(stream, opp_cnt);
+ if (mpc->funcs->set_out_rate_control) {
+ for (i = 0; i < opp_cnt; ++i) {
+ mpc->funcs->set_out_rate_control(
+ mpc, opp_inst[i],
+ true,
+ rate_control_2x_pclk,
+ &flow_control);
+ }
+ }
+#endif
for (odm_pipe = pipe_ctx->next_odm_pipe; odm_pipe; odm_pipe = odm_pipe->next_odm_pipe)
odm_pipe->stream_res.opp->funcs->opp_pipe_clock_control(
odm_pipe->stream_res.opp,
@@ -996,8 +1055,7 @@ void dcn20_blank_pixel_data(
if (!blank)
if (stream_res->abm) {
- stream_res->abm->funcs->set_pipe(stream_res->abm, stream_res->tg->inst + 1,
- stream->link->panel_cntl->inst);
+ dc->hwss.set_pipe(pipe_ctx);
stream_res->abm->funcs->set_abm_level(stream_res->abm, stream->abm_level);
}
}
@@ -1138,7 +1196,21 @@ void dcn20_pipe_control_lock(
(!flip_immediate && pipe->stream_res.gsl_group > 0))
dcn20_setup_gsl_group_as_lock(dc, pipe, flip_immediate);
- if (pipe->plane_state != NULL && pipe->plane_state->triplebuffer_flips) {
+ if (pipe->stream && should_use_dmub_lock(pipe->stream->link)) {
+ union dmub_hw_lock_flags hw_locks = { 0 };
+ struct dmub_hw_lock_inst_flags inst_flags = { 0 };
+
+ hw_locks.bits.lock_pipe = 1;
+ inst_flags.otg_inst = pipe->stream_res.tg->inst;
+
+ if (pipe->plane_state != NULL)
+ hw_locks.bits.triple_buffer_lock = pipe->plane_state->triplebuffer_flips;
+
+ dmub_hw_lock_mgr_cmd(dc->ctx->dmub_srv,
+ lock,
+ &hw_locks,
+ &inst_flags);
+ } else if (pipe->plane_state != NULL && pipe->plane_state->triplebuffer_flips) {
if (lock)
pipe->stream_res.tg->funcs->triplebuffer_lock(pipe->stream_res.tg);
else
@@ -1209,14 +1281,13 @@ static void dcn20_detect_pipe_changes(struct pipe_ctx *old_pipe, struct pipe_ctx
if (old_pipe->stream_res.tg != new_pipe->stream_res.tg)
new_pipe->update_flags.bits.tg_changed = 1;
- /* Detect mpcc blending changes, only dpp inst and bot matter here */
+ /*
+ * Detect mpcc blending changes, only dpp inst and opp matter here,
+ * mpccs getting removed/inserted update connected ones during their own
+ * programming
+ */
if (old_pipe->plane_res.dpp != new_pipe->plane_res.dpp
- || old_pipe->stream_res.opp != new_pipe->stream_res.opp
- || (!old_pipe->bottom_pipe && new_pipe->bottom_pipe)
- || (old_pipe->bottom_pipe && !new_pipe->bottom_pipe)
- || (old_pipe->bottom_pipe && new_pipe->bottom_pipe
- && old_pipe->bottom_pipe->plane_res.mpcc_inst
- != new_pipe->bottom_pipe->plane_res.mpcc_inst))
+ || old_pipe->stream_res.opp != new_pipe->stream_res.opp)
new_pipe->update_flags.bits.mpcc = 1;
/* Detect dppclk change */
@@ -1400,6 +1471,37 @@ static void dcn20_update_dchubp_dpp(
if (pipe_ctx->update_flags.bits.enable || pipe_ctx->update_flags.bits.opp_changed
|| pipe_ctx->stream->update_flags.bits.gamut_remap
|| pipe_ctx->stream->update_flags.bits.out_csc) {
+#if defined(CONFIG_DRM_AMD_DC_DCN3_0)
+ struct mpc *mpc = pipe_ctx->stream_res.opp->ctx->dc->res_pool->mpc;
+
+ if (mpc->funcs->set_gamut_remap) {
+ int i;
+ int mpcc_id = hubp->inst;
+ struct mpc_grph_gamut_adjustment adjust;
+ bool enable_remap_dpp = false;
+
+ memset(&adjust, 0, sizeof(adjust));
+ adjust.gamut_adjust_type = GRAPHICS_GAMUT_ADJUST_TYPE_BYPASS;
+ /* save the enablement of gamut remap for dpp*/
+ enable_remap_dpp = pipe_ctx->stream->gamut_remap_matrix.enable_remap;
+ /*force bypass gamut remap for dpp/cm*/
+ pipe_ctx->stream->gamut_remap_matrix.enable_remap = false;
+ dc->hwss.program_gamut_remap(pipe_ctx);
+ /*restore gamut remap flag for the top plane and use this remap into mpc*/
+ if (pipe_ctx->top_pipe == NULL)
+ pipe_ctx->stream->gamut_remap_matrix.enable_remap = enable_remap_dpp;
+ else
+ pipe_ctx->stream->gamut_remap_matrix.enable_remap = false;
+
+ if (pipe_ctx->stream->gamut_remap_matrix.enable_remap == true) {
+ adjust.gamut_adjust_type = GRAPHICS_GAMUT_ADJUST_TYPE_SW;
+ for (i = 0; i < CSC_TEMPERATURE_MATRIX_SIZE; i++)
+ adjust.temperature_matrix[i] =
+ pipe_ctx->stream->gamut_remap_matrix.matrix[i];
+ }
+ mpc->funcs->set_gamut_remap(mpc, mpcc_id, &adjust);
+ } else
+#endif
/* dpp/cm gamut remap*/
dc->hwss.program_gamut_remap(pipe_ctx);
@@ -1436,9 +1538,6 @@ static void dcn20_update_dchubp_dpp(
hubp->power_gated = false;
}
- if (hubp->funcs->apply_PLAT_54186_wa && viewport_changed)
- hubp->funcs->apply_PLAT_54186_wa(hubp, &plane_state->address);
-
if (pipe_ctx->update_flags.bits.enable || plane_state->update_flags.bits.addr_update)
hws->funcs.update_plane_addr(dc, pipe_ctx);
@@ -2162,6 +2261,11 @@ void dcn20_update_mpcc(struct dc *dc, struct pipe_ctx *pipe_ctx)
blnd_cfg.bottom_inside_gain = 0x1f000;
blnd_cfg.bottom_outside_gain = 0x1f000;
blnd_cfg.pre_multiplied_alpha = per_pixel_alpha;
+#if defined(CONFIG_DRM_AMD_DC_DCN3_0)
+ if (pipe_ctx->plane_state->format
+ == SURFACE_PIXEL_FORMAT_GRPH_RGBE_ALPHA)
+ blnd_cfg.pre_multiplied_alpha = false;
+#endif
/*
* TODO: remove hack
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_init.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_init.c
index 2fbde4241559..bb9e9bec2f28 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_init.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_init.c
@@ -86,6 +86,7 @@ static const struct hw_sequencer_funcs dcn20_funcs = {
.calc_vupdate_position = dcn10_calc_vupdate_position,
.set_backlight_level = dce110_set_backlight_level,
.set_abm_immediate_disable = dce110_set_abm_immediate_disable,
+ .set_pipe = dce110_set_pipe,
};
static const struct hwseq_private_funcs dcn20_private_funcs = {
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_link_encoder.h b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_link_encoder.h
index 284a1ee4d249..db09f40075c2 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_link_encoder.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_link_encoder.h
@@ -147,8 +147,7 @@
LE_SF(DPCSTX0_DPCSTX_TX_CNTL, DPCS_TX_DATA_SWAP, mask_sh),\
LE_SF(DPCSTX0_DPCSTX_TX_CNTL, DPCS_TX_DATA_ORDER_INVERT, mask_sh),\
LE_SF(DPCSTX0_DPCSTX_TX_CNTL, DPCS_TX_FIFO_EN, mask_sh),\
- LE_SF(DPCSTX0_DPCSTX_TX_CNTL, DPCS_TX_FIFO_RD_START_DELAY, mask_sh),\
- LE_SF(DPCSTX0_DPCSTX_DEBUG_CONFIG, DPCS_DBG_CBUS_DIS, mask_sh)
+ LE_SF(DPCSTX0_DPCSTX_TX_CNTL, DPCS_TX_FIFO_RD_START_DELAY, mask_sh)
#define DPCS_DCN2_MASK_SH_LIST(mask_sh)\
DPCS_MASK_SH_LIST(mask_sh),\
@@ -274,6 +273,10 @@ struct mpll_cfg {
bool dp_tx1_vergdrv_byp;
bool dp_tx2_vergdrv_byp;
bool dp_tx3_vergdrv_byp;
+#if defined(CONFIG_DRM_AMD_DC_DCN3_0)
+ uint32_t tx_peaking_lvl;
+ uint32_t ctr_reqs_pll;
+#endif
};
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c
index cef1aa938ab5..0983bcc25117 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c
@@ -157,7 +157,7 @@ struct _vcs_dpi_ip_params_st dcn2_0_ip = {
.number_of_cursors = 1,
};
-struct _vcs_dpi_ip_params_st dcn2_0_nv14_ip = {
+static struct _vcs_dpi_ip_params_st dcn2_0_nv14_ip = {
.odm_capable = 1,
.gpuvm_enable = 0,
.hostvm_enable = 0,
@@ -226,7 +226,7 @@ struct _vcs_dpi_ip_params_st dcn2_0_nv14_ip = {
.number_of_cursors = 1,
};
-struct _vcs_dpi_soc_bounding_box_st dcn2_0_soc = {
+static struct _vcs_dpi_soc_bounding_box_st dcn2_0_soc = {
/* Defaults that get patched on driver load from firmware. */
.clock_limits = {
{
@@ -338,7 +338,7 @@ struct _vcs_dpi_soc_bounding_box_st dcn2_0_soc = {
.use_urgent_burst_bw = 0
};
-struct _vcs_dpi_soc_bounding_box_st dcn2_0_nv14_soc = {
+static struct _vcs_dpi_soc_bounding_box_st dcn2_0_nv14_soc = {
.clock_limits = {
{
.state = 0,
@@ -449,7 +449,7 @@ struct _vcs_dpi_soc_bounding_box_st dcn2_0_nv14_soc = {
.use_urgent_burst_bw = 0
};
-struct _vcs_dpi_soc_bounding_box_st dcn2_0_nv12_soc = { 0 };
+static struct _vcs_dpi_soc_bounding_box_st dcn2_0_nv12_soc = { 0 };
#ifndef mmDP0_DP_DPHY_INTERNAL_CTRL
#define mmDP0_DP_DPHY_INTERNAL_CTRL 0x210f
@@ -1323,7 +1323,7 @@ static struct panel_cntl *dcn20_panel_cntl_create(const struct panel_cntl_init_d
return &panel_cntl->base;
}
-struct clock_source *dcn20_clock_source_create(
+static struct clock_source *dcn20_clock_source_create(
struct dc_context *ctx,
struct dc_bios *bios,
enum clock_source_id id,
@@ -2015,6 +2015,10 @@ int dcn20_populate_dml_pipes_from_context(
pipe_cnt = i;
continue;
}
+
+ if (res_ctx->pipe_ctx[pipe_cnt].stream == res_ctx->pipe_ctx[i].stream)
+ continue;
+
if (dc->debug.disable_timing_sync || !resource_are_streams_timing_synchronizable(
res_ctx->pipe_ctx[pipe_cnt].stream,
res_ctx->pipe_ctx[i].stream)) {
@@ -2029,6 +2033,9 @@ int dcn20_populate_dml_pipes_from_context(
unsigned int front_porch;
int output_bpc;
+#if defined(CONFIG_DRM_AMD_DC_DCN3_0)
+ struct audio_check aud_check = {0};
+#endif
if (!res_ctx->pipe_ctx[i].stream)
continue;
@@ -2083,6 +2090,11 @@ int dcn20_populate_dml_pipes_from_context(
case 1:
pipes[pipe_cnt].pipe.dest.odm_combine = dm_odm_combine_mode_2to1;
break;
+#if defined(CONFIG_DRM_AMD_DC_DCN3_0)
+ case 3:
+ pipes[pipe_cnt].pipe.dest.odm_combine = dm_odm_combine_mode_4to1;
+ break;
+#endif
default:
pipes[pipe_cnt].pipe.dest.odm_combine = dm_odm_combine_mode_disabled;
}
@@ -2179,6 +2191,11 @@ int dcn20_populate_dml_pipes_from_context(
/* todo: default max for now, until there is logic reflecting this in dc*/
pipes[pipe_cnt].dout.output_bpc = 12;
+#if defined(CONFIG_DRM_AMD_DC_DCN3_0)
+ /*fill up the audio sample rate*/
+ get_audio_check(&res_ctx->pipe_ctx[i].stream->audio_info, &aud_check);
+ pipes[pipe_cnt].dout.max_audio_sample_rate = aud_check.max_audiosample_rate;
+#endif
/*
* For graphic plane, cursor number is 1, nv12 is 0
* bw calculations due to cursor on/off
@@ -2226,6 +2243,12 @@ int dcn20_populate_dml_pipes_from_context(
pipes[pipe_cnt].pipe.src.viewport_width /= 2;
pipes[pipe_cnt].pipe.dest.recout_width /= 2;
}
+#if defined(CONFIG_DRM_AMD_DC_DCN3_0)
+ else if (pipes[pipe_cnt].pipe.dest.odm_combine == dm_odm_combine_mode_4to1) {
+ pipes[pipe_cnt].pipe.src.viewport_width /= 4;
+ pipes[pipe_cnt].pipe.dest.recout_width /= 4;
+ }
+#endif
} else {
struct dc_plane_state *pln = res_ctx->pipe_ctx[i].plane_state;
struct scaler_data *scl = &res_ctx->pipe_ctx[i].plane_res.scl_data;
@@ -2234,6 +2257,14 @@ int dcn20_populate_dml_pipes_from_context(
pipes[pipe_cnt].pipe.src.is_hsplit = (res_ctx->pipe_ctx[i].bottom_pipe && res_ctx->pipe_ctx[i].bottom_pipe->plane_state == pln)
|| (res_ctx->pipe_ctx[i].top_pipe && res_ctx->pipe_ctx[i].top_pipe->plane_state == pln)
|| pipes[pipe_cnt].pipe.dest.odm_combine != dm_odm_combine_mode_disabled;
+
+ /* stereo is never split, nor odm combine */
+ if (pln->stereo_format == PLANE_STEREO_FORMAT_SIDE_BY_SIDE ||
+ pln->stereo_format == PLANE_STEREO_FORMAT_TOP_AND_BOTTOM) {
+ pipes[pipe_cnt].pipe.src.is_hsplit = false;
+ pipes[pipe_cnt].pipe.src.hsplit_grp = res_ctx->pipe_ctx[i].pipe_idx;
+ }
+
pipes[pipe_cnt].pipe.src.source_scan = pln->rotation == ROTATION_ANGLE_90
|| pln->rotation == ROTATION_ANGLE_270 ? dm_vert : dm_horz;
pipes[pipe_cnt].pipe.src.viewport_y_y = scl->viewport_unadjusted.y;
@@ -2246,7 +2277,12 @@ int dcn20_populate_dml_pipes_from_context(
pipes[pipe_cnt].pipe.src.surface_height_y = pln->plane_size.surface_size.height;
pipes[pipe_cnt].pipe.src.surface_width_c = pln->plane_size.chroma_size.width;
pipes[pipe_cnt].pipe.src.surface_height_c = pln->plane_size.chroma_size.height;
+#if defined(CONFIG_DRM_AMD_DC_DCN3_0)
+ if (pln->format == SURFACE_PIXEL_FORMAT_GRPH_RGBE_ALPHA
+ || pln->format >= SURFACE_PIXEL_FORMAT_VIDEO_BEGIN) {
+#else
if (pln->format >= SURFACE_PIXEL_FORMAT_VIDEO_BEGIN) {
+#endif
pipes[pipe_cnt].pipe.src.data_pitch = pln->plane_size.surface_pitch;
pipes[pipe_cnt].pipe.src.data_pitch_c = pln->plane_size.chroma_pitch;
pipes[pipe_cnt].pipe.src.meta_pitch = pln->dcc.meta_pitch;
@@ -2262,6 +2298,10 @@ int dcn20_populate_dml_pipes_from_context(
pipes[pipe_cnt].pipe.dest.full_recout_width = scl->recout.width;
if (pipes[pipe_cnt].pipe.dest.odm_combine == dm_odm_combine_mode_2to1)
pipes[pipe_cnt].pipe.dest.full_recout_width *= 2;
+#if defined(CONFIG_DRM_AMD_DC_DCN3_0)
+ else if (pipes[pipe_cnt].pipe.dest.odm_combine == dm_odm_combine_mode_4to1)
+ pipes[pipe_cnt].pipe.dest.full_recout_width *= 4;
+#endif
else {
struct pipe_ctx *split_pipe = res_ctx->pipe_ctx[i].bottom_pipe;
@@ -2318,6 +2358,11 @@ int dcn20_populate_dml_pipes_from_context(
case SURFACE_PIXEL_FORMAT_GRPH_PALETA_256_COLORS:
pipes[pipe_cnt].pipe.src.source_format = dm_444_8;
break;
+#if defined(CONFIG_DRM_AMD_DC_DCN3_0)
+ case SURFACE_PIXEL_FORMAT_GRPH_RGBE_ALPHA:
+ pipes[pipe_cnt].pipe.src.source_format = dm_rgbe_alpha;
+ break;
+#endif
default:
pipes[pipe_cnt].pipe.src.source_format = dm_444_32;
break;
@@ -2629,6 +2674,23 @@ int dcn20_validate_apply_pipe_split_flags(
if (plane_count > dc->res_pool->pipe_count / 2)
avoid_split = true;
+ /* W/A: Mode timing with borders may not work well with pipe split, avoid for this corner case */
+ for (i = 0; i < dc->res_pool->pipe_count; i++) {
+ struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
+ struct dc_crtc_timing timing;
+
+ if (!pipe->stream)
+ continue;
+ else {
+ timing = pipe->stream->timing;
+ if (timing.h_border_left + timing.h_border_right
+ + timing.v_border_top + timing.v_border_bottom > 0) {
+ avoid_split = true;
+ break;
+ }
+ }
+ }
+
/* Avoid split loop looks for lowest voltage level that allows most unsplit pipes possible */
if (avoid_split) {
for (i = 0, pipe_idx = 0; i < dc->res_pool->pipe_count; i++) {
@@ -2678,6 +2740,17 @@ int dcn20_validate_apply_pipe_split_flags(
split[i] = 2;
v->ODMCombineEnablePerState[vlevel][pipe_plane] = dm_odm_combine_mode_2to1;
}
+#if defined(CONFIG_DRM_AMD_DC_DCN3_0)
+ if (dc->debug.force_odm_combine_4to1 & (1 << pipe->stream_res.tg->inst)) {
+ split[i] = 4;
+ v->ODMCombineEnablePerState[vlevel][pipe_plane] = dm_odm_combine_mode_4to1;
+ }
+ /*420 format workaround*/
+ if (pipe->stream->timing.h_addressable > 7680 &&
+ pipe->stream->timing.pixel_encoding == PIXEL_ENCODING_YCBCR420) {
+ split[i] = 4;
+ }
+#endif
v->ODMCombineEnabled[pipe_plane] =
v->ODMCombineEnablePerState[vlevel][pipe_plane];
@@ -2836,8 +2909,8 @@ bool dcn20_fast_validate_bw(
dcn20_split_stream_for_mpc(
&context->res_ctx, dc->res_pool,
pipe, hsplit_pipe);
- if (!resource_build_scaling_params(pipe) || !resource_build_scaling_params(hsplit_pipe))
- goto validate_fail;
+ resource_build_scaling_params(pipe);
+ resource_build_scaling_params(hsplit_pipe);
}
pipe_split_from[hsplit_pipe->pipe_idx] = pipe_idx;
}
@@ -4053,8 +4126,12 @@ static bool dcn20_resource_construct(
// to be consumed. We could have created dcn20_init_hw to get
// the same effect by checking ASIC rev, but there was a
// request at some point to not check ASIC rev on hw sequencer.
- if (ASICREV_IS_NAVI12_P(dc->ctx->asic_id.hw_internal_rev))
+ if (ASICREV_IS_NAVI12_P(dc->ctx->asic_id.hw_internal_rev)) {
dc->hwseq->funcs.enable_power_gating_plane = NULL;
+ dc->debug.disable_dpp_power_gate = true;
+ dc->debug.disable_hubp_power_gate = true;
+ }
+
dc->caps.max_planes = pool->base.pipe_count;
diff --git a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hubp.c b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hubp.c
index 960a0716dde5..f9045852728f 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hubp.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hubp.c
@@ -225,116 +225,6 @@ void hubp21_set_viewport(
SEC_VIEWPORT_Y_START_C, viewport_c->y);
}
-static void hubp21_apply_PLAT_54186_wa(
- struct hubp *hubp,
- const struct dc_plane_address *address)
-{
- struct dcn21_hubp *hubp21 = TO_DCN21_HUBP(hubp);
- struct dc_debug_options *debug = &hubp->ctx->dc->debug;
- unsigned int chroma_bpe = 2;
- unsigned int luma_addr_high_part = 0;
- unsigned int row_height = 0;
- unsigned int chroma_pitch = 0;
- unsigned int viewport_c_height = 0;
- unsigned int viewport_c_width = 0;
- unsigned int patched_viewport_height = 0;
- unsigned int patched_viewport_width = 0;
- unsigned int rotation_angle = 0;
- unsigned int pix_format = 0;
- unsigned int h_mirror_en = 0;
- unsigned int tile_blk_size = 64 * 1024; /* 64KB for 64KB SW, 4KB for 4KB SW */
-
-
- if (!debug->nv12_iflip_vm_wa)
- return;
-
- REG_GET(DCHUBP_REQ_SIZE_CONFIG_C,
- PTE_ROW_HEIGHT_LINEAR_C, &row_height);
-
- REG_GET_2(DCSURF_PRI_VIEWPORT_DIMENSION_C,
- PRI_VIEWPORT_WIDTH_C, &viewport_c_width,
- PRI_VIEWPORT_HEIGHT_C, &viewport_c_height);
-
- REG_GET(DCSURF_PRIMARY_SURFACE_ADDRESS_HIGH_C,
- PRIMARY_SURFACE_ADDRESS_HIGH_C, &luma_addr_high_part);
-
- REG_GET(DCSURF_SURFACE_PITCH_C,
- PITCH_C, &chroma_pitch);
-
- chroma_pitch += 1;
-
- REG_GET_3(DCSURF_SURFACE_CONFIG,
- SURFACE_PIXEL_FORMAT, &pix_format,
- ROTATION_ANGLE, &rotation_angle,
- H_MIRROR_EN, &h_mirror_en);
-
- /* reset persistent cached data */
- hubp21->PLAT_54186_wa_chroma_addr_offset = 0;
- /* apply wa only for NV12 surface with scatter gather enabled with viewport > 512 along
- * the vertical direction*/
- if (address->type != PLN_ADDR_TYPE_VIDEO_PROGRESSIVE ||
- address->video_progressive.luma_addr.high_part == 0xf4)
- return;
-
- if ((rotation_angle == ROTATION_ANGLE_0 || rotation_angle == ROTATION_ANGLE_180)
- && viewport_c_height <= 512)
- return;
-
- if ((rotation_angle == ROTATION_ANGLE_90 || rotation_angle == ROTATION_ANGLE_270)
- && viewport_c_width <= 512)
- return;
-
- switch (rotation_angle) {
- case ROTATION_ANGLE_0: /* 0 degree rotation */
- row_height = 128;
- patched_viewport_height = (viewport_c_height / row_height + 1) * row_height + 1;
- patched_viewport_width = viewport_c_width;
- hubp21->PLAT_54186_wa_chroma_addr_offset = 0;
- break;
- case ROTATION_ANGLE_180: /* 180 degree rotation */
- row_height = 128;
- patched_viewport_height = viewport_c_height + row_height;
- patched_viewport_width = viewport_c_width;
- hubp21->PLAT_54186_wa_chroma_addr_offset = 0 - chroma_pitch * row_height * chroma_bpe;
- break;
- case ROTATION_ANGLE_90: /* 90 degree rotation */
- row_height = 256;
- if (h_mirror_en) {
- patched_viewport_height = viewport_c_height;
- patched_viewport_width = viewport_c_width + row_height;
- hubp21->PLAT_54186_wa_chroma_addr_offset = 0;
- } else {
- patched_viewport_height = viewport_c_height;
- patched_viewport_width = viewport_c_width + row_height;
- hubp21->PLAT_54186_wa_chroma_addr_offset = 0 - tile_blk_size;
- }
- break;
- case ROTATION_ANGLE_270: /* 270 degree rotation */
- row_height = 256;
- if (h_mirror_en) {
- patched_viewport_height = viewport_c_height;
- patched_viewport_width = viewport_c_width + row_height;
- hubp21->PLAT_54186_wa_chroma_addr_offset = 0 - tile_blk_size;
- } else {
- patched_viewport_height = viewport_c_height;
- patched_viewport_width = viewport_c_width + row_height;
- hubp21->PLAT_54186_wa_chroma_addr_offset = 0;
- }
- break;
- default:
- ASSERT(0);
- break;
- }
-
- /* catch cases where viewport keep growing */
- ASSERT(patched_viewport_height && patched_viewport_height < 5000);
- ASSERT(patched_viewport_width && patched_viewport_width < 5000);
-
- REG_UPDATE_2(DCSURF_PRI_VIEWPORT_DIMENSION_C,
- PRI_VIEWPORT_WIDTH_C, patched_viewport_width,
- PRI_VIEWPORT_HEIGHT_C, patched_viewport_height);
-}
-
void hubp21_set_vm_system_aperture_settings(struct hubp *hubp,
struct vm_system_aperture_param *apt)
{
@@ -812,8 +702,6 @@ bool hubp21_program_surface_flip_and_addr(
const struct dc_plane_address *address,
bool flip_immediate)
{
- struct dc_debug_options *debug = &hubp->ctx->dc->debug;
- struct dcn21_hubp *hubp21 = TO_DCN21_HUBP(hubp);
struct surface_flip_registers flip_regs = { 0 };
flip_regs.vmid = address->vmid;
@@ -859,12 +747,8 @@ bool hubp21_program_surface_flip_and_addr(
flip_regs.DCSURF_PRIMARY_SURFACE_ADDRESS_HIGH =
address->video_progressive.luma_addr.high_part;
- if (debug->nv12_iflip_vm_wa) {
- flip_regs.DCSURF_PRIMARY_SURFACE_ADDRESS_C =
- address->video_progressive.chroma_addr.low_part + hubp21->PLAT_54186_wa_chroma_addr_offset;
- } else
- flip_regs.DCSURF_PRIMARY_SURFACE_ADDRESS_C =
- address->video_progressive.chroma_addr.low_part;
+ flip_regs.DCSURF_PRIMARY_SURFACE_ADDRESS_C =
+ address->video_progressive.chroma_addr.low_part;
flip_regs.DCSURF_PRIMARY_SURFACE_ADDRESS_HIGH_C =
address->video_progressive.chroma_addr.high_part;
@@ -942,7 +826,6 @@ static struct hubp_funcs dcn21_hubp_funcs = {
.set_blank = hubp1_set_blank,
.dcc_control = hubp1_dcc_control,
.mem_program_viewport = hubp21_set_viewport,
- .apply_PLAT_54186_wa = hubp21_apply_PLAT_54186_wa,
.set_cursor_attributes = hubp2_cursor_set_attributes,
.set_cursor_position = hubp1_cursor_set_position,
.hubp_clk_cntl = hubp1_clk_cntl,
diff --git a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hwseq.c b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hwseq.c
index ada65b1a7eb1..01f1d3d9a639 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hwseq.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hwseq.c
@@ -28,10 +28,13 @@
#include "core_types.h"
#include "resource.h"
#include "dce/dce_hwseq.h"
+#include "dce110/dce110_hw_sequencer.h"
#include "dcn21_hwseq.h"
#include "vmid.h"
#include "reg_helper.h"
#include "hw/clk_mgr.h"
+#include "dc_dmub_srv.h"
+#include "abm.h"
#define DC_LOGGER_INIT(logger)
@@ -134,3 +137,89 @@ void dcn21_PLAT_58856_wa(struct dc_state *context, struct pipe_ctx *pipe_ctx)
pipe_ctx->stream->dpms_off = true;
}
+static bool dmub_abm_set_pipe(struct abm *abm, uint32_t otg_inst, uint32_t option, uint32_t panel_inst)
+{
+ union dmub_rb_cmd cmd;
+ struct dc_context *dc = abm->ctx;
+ uint32_t ramping_boundary = 0xFFFF;
+
+ cmd.abm_set_pipe.header.type = DMUB_CMD__ABM;
+ cmd.abm_set_pipe.header.sub_type = DMUB_CMD__ABM_SET_PIPE;
+ cmd.abm_set_pipe.abm_set_pipe_data.otg_inst = otg_inst;
+ cmd.abm_set_pipe.abm_set_pipe_data.set_pipe_option = option;
+ cmd.abm_set_pipe.abm_set_pipe_data.panel_inst = panel_inst;
+ cmd.abm_set_pipe.abm_set_pipe_data.ramping_boundary = ramping_boundary;
+ cmd.abm_set_pipe.header.payload_bytes = sizeof(struct dmub_cmd_abm_set_pipe_data);
+
+ dc_dmub_srv_cmd_queue(dc->dmub_srv, &cmd);
+ dc_dmub_srv_cmd_execute(dc->dmub_srv);
+ dc_dmub_srv_wait_idle(dc->dmub_srv);
+
+ return true;
+}
+
+void dcn21_set_abm_immediate_disable(struct pipe_ctx *pipe_ctx)
+{
+ struct abm *abm = pipe_ctx->stream_res.abm;
+ uint32_t otg_inst = pipe_ctx->stream_res.tg->inst;
+ struct panel_cntl *panel_cntl = pipe_ctx->stream->link->panel_cntl;
+
+ struct dmcu *dmcu = pipe_ctx->stream->ctx->dc->res_pool->dmcu;
+
+ if (dmcu) {
+ dce110_set_abm_immediate_disable(pipe_ctx);
+ return;
+ }
+
+ if (abm && panel_cntl)
+ dmub_abm_set_pipe(abm, otg_inst, SET_ABM_PIPE_IMMEDIATELY_DISABLE,
+ panel_cntl->inst);
+}
+
+void dcn21_set_pipe(struct pipe_ctx *pipe_ctx)
+{
+ struct abm *abm = pipe_ctx->stream_res.abm;
+ uint32_t otg_inst = pipe_ctx->stream_res.tg->inst;
+ struct panel_cntl *panel_cntl = pipe_ctx->stream->link->panel_cntl;
+ struct dmcu *dmcu = pipe_ctx->stream->ctx->dc->res_pool->dmcu;
+
+ if (dmcu) {
+ dce110_set_pipe(pipe_ctx);
+ return;
+ }
+
+ if (abm && panel_cntl)
+ dmub_abm_set_pipe(abm, otg_inst, SET_ABM_PIPE_NORMAL, panel_cntl->inst);
+}
+
+bool dcn21_set_backlight_level(struct pipe_ctx *pipe_ctx,
+ uint32_t backlight_pwm_u16_16,
+ uint32_t frame_ramp)
+{
+ union dmub_rb_cmd cmd;
+ struct dc_context *dc = pipe_ctx->stream->ctx;
+ struct abm *abm = pipe_ctx->stream_res.abm;
+ uint32_t otg_inst = pipe_ctx->stream_res.tg->inst;
+ struct panel_cntl *panel_cntl = pipe_ctx->stream->link->panel_cntl;
+
+ if (dc->dc->res_pool->dmcu) {
+ dce110_set_backlight_level(pipe_ctx, backlight_pwm_u16_16, frame_ramp);
+ return true;
+ }
+
+ if (abm && panel_cntl)
+ dmub_abm_set_pipe(abm, otg_inst, SET_ABM_PIPE_NORMAL, panel_cntl->inst);
+
+ cmd.abm_set_backlight.header.type = DMUB_CMD__ABM;
+ cmd.abm_set_backlight.header.sub_type = DMUB_CMD__ABM_SET_BACKLIGHT;
+ cmd.abm_set_backlight.abm_set_backlight_data.frame_ramp = frame_ramp;
+ cmd.abm_set_backlight.abm_set_backlight_data.backlight_user_level = backlight_pwm_u16_16;
+ cmd.abm_set_backlight.header.payload_bytes = sizeof(struct dmub_cmd_abm_set_backlight_data);
+
+ dc_dmub_srv_cmd_queue(dc->dmub_srv, &cmd);
+ dc_dmub_srv_cmd_execute(dc->dmub_srv);
+ dc_dmub_srv_wait_idle(dc->dmub_srv);
+
+ return true;
+}
+
diff --git a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hwseq.h b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hwseq.h
index 26bf24d3b59f..9e97747e57cd 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hwseq.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hwseq.h
@@ -47,4 +47,10 @@ void dcn21_optimize_pwr_state(
void dcn21_PLAT_58856_wa(struct dc_state *context,
struct pipe_ctx *pipe_ctx);
+void dcn21_set_pipe(struct pipe_ctx *pipe_ctx);
+void dcn21_set_abm_immediate_disable(struct pipe_ctx *pipe_ctx);
+bool dcn21_set_backlight_level(struct pipe_ctx *pipe_ctx,
+ uint32_t backlight_pwm_u16_16,
+ uint32_t frame_ramp);
+
#endif /* __DC_HWSS_DCN21_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_init.c b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_init.c
index a5baef7e7a7d..8575de1a8ad2 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_init.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_init.c
@@ -88,8 +88,9 @@ static const struct hw_sequencer_funcs dcn21_funcs = {
.get_vupdate_offset_from_vsync = dcn10_get_vupdate_offset_from_vsync,
.calc_vupdate_position = dcn10_calc_vupdate_position,
.power_down = dce110_power_down,
- .set_backlight_level = dce110_set_backlight_level,
- .set_abm_immediate_disable = dce110_set_abm_immediate_disable,
+ .set_backlight_level = dcn21_set_backlight_level,
+ .set_abm_immediate_disable = dcn21_set_abm_immediate_disable,
+ .set_pipe = dcn21_set_pipe,
};
static const struct hwseq_private_funcs dcn21_private_funcs = {
diff --git a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c
index f00a56835084..61b337267a72 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c
@@ -88,7 +88,6 @@
#include "dce/dmub_psr.h"
#include "dce/dmub_abm.h"
-#define SOC_BOUNDING_BOX_VALID false
#define DC_LOGGER_INIT(logger)
@@ -878,7 +877,6 @@ static const struct dc_debug_options debug_defaults_drv = {
.scl_reset_length10 = true,
.sanity_checks = true,
.disable_48mhz_pwrdwn = false,
- .nv12_iflip_vm_wa = true,
.usbc_combo_phy_reset_wa = true
};
@@ -1908,6 +1906,8 @@ static bool dcn21_resource_construct(
BREAK_TO_DEBUGGER();
goto create_fail;
}
+
+ dc->debug.dmub_command_table = false;
}
if (dc->config.disable_dmcu) {
diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/Makefile b/drivers/gpu/drm/amd/display/dc/dcn30/Makefile
new file mode 100644
index 000000000000..025637a83c3b
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dcn30/Makefile
@@ -0,0 +1,54 @@
+#
+# Copyright 2020 Advanced Micro Devices, Inc.
+#
+# Permission is hereby granted, free of charge, to any person obtaining a
+# copy of this software and associated documentation files (the "Software"),
+# to deal in the Software without restriction, including without limitation
+# the rights to use, copy, modify, merge, publish, distribute, sublicense,
+# and/or sell copies of the Software, and to permit persons to whom the
+# Software is furnished to do so, subject to the following conditions:
+#
+# The above copyright notice and this permission notice shall be included in
+# all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+# THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+# ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+# OTHER DEALINGS IN THE SOFTWARE.
+#
+# Authors: AMD
+#
+#
+
+
+DCN30 = dcn30_init.o dcn30_hubbub.o dcn30_hubp.o dcn30_dpp.o dcn30_optc.o \
+ dcn30_dccg.o dcn30_hwseq.o dcn30_mpc.o dcn30_vpg.o \
+ dcn30_afmt.o dcn30_dio_stream_encoder.o dcn30_dwb.o \
+ dcn30_dpp_cm.o dcn30_dwb_cm.o dcn30_cm_common.o dcn30_mmhubbub.o \
+ dcn30_dio_link_encoder.o dcn30_resource.o
+
+
+CFLAGS_$(AMDDALPATH)/dc/dcn30/dcn30_optc.o := -mhard-float -msse -mpreferred-stack-boundary=4
+
+CFLAGS_$(AMDDALPATH)/dc/dcn30/dcn30_resource.o := -mhard-float -msse
+ifdef CONFIG_CC_IS_GCC
+ifeq ($(call cc-ifversion, -lt, 0701, y), y)
+IS_OLD_GCC = 1
+endif
+endif
+
+ifdef IS_OLD_GCC
+# Stack alignment mismatch, proceed with caution.
+# GCC < 7.1 cannot compile code using `double` and -mpreferred-stack-boundary=3
+# (8B stack alignment).
+CFLAGS_$(AMDDALPATH)/dc/dcn30/dcn30_resource.o += -mpreferred-stack-boundary=4
+else
+CFLAGS_$(AMDDALPATH)/dc/dcn30/dcn30_resource.o += -msse2
+endif
+
+AMD_DAL_DCN30 = $(addprefix $(AMDDALPATH)/dc/dcn30/,$(DCN30))
+
+AMD_DISPLAY_FILES += $(AMD_DAL_DCN30)
diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_afmt.c b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_afmt.c
new file mode 100644
index 000000000000..2b08b1d72177
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_afmt.c
@@ -0,0 +1,206 @@
+/*
+ * Copyright 2020 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+
+#include "dc_bios_types.h"
+#include "dcn30_afmt.h"
+#include "reg_helper.h"
+
+#define DC_LOGGER \
+ afmt3->base.ctx->logger
+
+#define REG(reg)\
+ (afmt3->regs->reg)
+
+#undef FN
+#define FN(reg_name, field_name) \
+ afmt3->afmt_shift->field_name, afmt3->afmt_mask->field_name
+
+
+#define CTX \
+ afmt3->base.ctx
+
+
+static void afmt3_setup_hdmi_audio(
+ struct afmt *afmt)
+{
+ struct dcn30_afmt *afmt3 = DCN30_AFMT_FROM_AFMT(afmt);
+
+ /* AFMT_AUDIO_PACKET_CONTROL */
+ REG_UPDATE(AFMT_AUDIO_PACKET_CONTROL, AFMT_60958_CS_UPDATE, 1);
+
+ /* AFMT_AUDIO_PACKET_CONTROL2 */
+ REG_UPDATE_2(AFMT_AUDIO_PACKET_CONTROL2,
+ AFMT_AUDIO_LAYOUT_OVRD, 0,
+ AFMT_60958_OSF_OVRD, 0);
+
+ /* AFMT_60958_0__AFMT_60958_CS_CHANNEL_NUMBER_L_MASK &
+ * AFMT_60958_0__AFMT_60958_CS_CLOCK_ACCURACY_MASK
+ */
+ REG_UPDATE_2(AFMT_60958_0,
+ AFMT_60958_CS_CHANNEL_NUMBER_L, 1,
+ AFMT_60958_CS_CLOCK_ACCURACY, 0);
+
+ /* AFMT_60958_1 AFMT_60958_CS_CHALNNEL_NUMBER_R */
+ REG_UPDATE(AFMT_60958_1, AFMT_60958_CS_CHANNEL_NUMBER_R, 2);
+
+ /* AFMT_60958_2 now keep this settings until
+ * Programming guide comes out
+ */
+ REG_UPDATE_6(AFMT_60958_2,
+ AFMT_60958_CS_CHANNEL_NUMBER_2, 3,
+ AFMT_60958_CS_CHANNEL_NUMBER_3, 4,
+ AFMT_60958_CS_CHANNEL_NUMBER_4, 5,
+ AFMT_60958_CS_CHANNEL_NUMBER_5, 6,
+ AFMT_60958_CS_CHANNEL_NUMBER_6, 7,
+ AFMT_60958_CS_CHANNEL_NUMBER_7, 8);
+}
+
+static union audio_cea_channels speakers_to_channels(
+ struct audio_speaker_flags speaker_flags)
+{
+ union audio_cea_channels cea_channels = {0};
+
+ /* these are one to one */
+ cea_channels.channels.FL = speaker_flags.FL_FR;
+ cea_channels.channels.FR = speaker_flags.FL_FR;
+ cea_channels.channels.LFE = speaker_flags.LFE;
+ cea_channels.channels.FC = speaker_flags.FC;
+
+ /* if Rear Left and Right exist move RC speaker to channel 7
+ * otherwise to channel 5
+ */
+ if (speaker_flags.RL_RR) {
+ cea_channels.channels.RL_RC = speaker_flags.RL_RR;
+ cea_channels.channels.RR = speaker_flags.RL_RR;
+ cea_channels.channels.RC_RLC_FLC = speaker_flags.RC;
+ } else {
+ cea_channels.channels.RL_RC = speaker_flags.RC;
+ }
+
+ /* FRONT Left Right Center and REAR Left Right Center are exclusive */
+ if (speaker_flags.FLC_FRC) {
+ cea_channels.channels.RC_RLC_FLC = speaker_flags.FLC_FRC;
+ cea_channels.channels.RRC_FRC = speaker_flags.FLC_FRC;
+ } else {
+ cea_channels.channels.RC_RLC_FLC = speaker_flags.RLC_RRC;
+ cea_channels.channels.RRC_FRC = speaker_flags.RLC_RRC;
+ }
+
+ return cea_channels;
+}
+
+static void afmt3_se_audio_setup(
+ struct afmt *afmt,
+ unsigned int az_inst,
+ struct audio_info *audio_info)
+{
+ struct dcn30_afmt *afmt3 = DCN30_AFMT_FROM_AFMT(afmt);
+
+ uint32_t speakers = 0;
+ uint32_t channels = 0;
+
+ ASSERT(audio_info);
+ /* This should not happen.it does so we don't get BSOD*/
+ if (audio_info == NULL)
+ return;
+
+ speakers = audio_info->flags.info.ALLSPEAKERS;
+ channels = speakers_to_channels(audio_info->flags.speaker_flags).all;
+
+ /* setup the audio stream source select (audio -> dig mapping) */
+ REG_SET(AFMT_AUDIO_SRC_CONTROL, 0, AFMT_AUDIO_SRC_SELECT, az_inst);
+
+ /* Channel allocation */
+ REG_UPDATE(AFMT_AUDIO_PACKET_CONTROL2, AFMT_AUDIO_CHANNEL_ENABLE, channels);
+
+ /* Disable forced mem power off */
+ REG_UPDATE(AFMT_MEM_PWR, AFMT_MEM_PWR_FORCE, 0);
+}
+
+static void afmt3_audio_mute_control(
+ struct afmt *afmt,
+ bool mute)
+{
+ struct dcn30_afmt *afmt3 = DCN30_AFMT_FROM_AFMT(afmt);
+
+ /* enable/disable transmission of audio packets */
+ REG_UPDATE(AFMT_AUDIO_PACKET_CONTROL, AFMT_AUDIO_SAMPLE_SEND, !mute);
+}
+
+static void afmt3_audio_info_immediate_update(
+ struct afmt *afmt)
+{
+ struct dcn30_afmt *afmt3 = DCN30_AFMT_FROM_AFMT(afmt);
+
+ /* update double-buffered AUDIO_INFO registers immediately */
+ REG_UPDATE(AFMT_INFOFRAME_CONTROL0, AFMT_AUDIO_INFO_UPDATE, 1);
+}
+
+static void afmt3_setup_dp_audio(
+ struct afmt *afmt)
+{
+ struct dcn30_afmt *afmt3 = DCN30_AFMT_FROM_AFMT(afmt);
+
+ /* AFMT_AUDIO_PACKET_CONTROL */
+ REG_UPDATE(AFMT_AUDIO_PACKET_CONTROL, AFMT_60958_CS_UPDATE, 1);
+
+ /* AFMT_AUDIO_PACKET_CONTROL2 */
+ /* Program the ATP and AIP next */
+ REG_UPDATE_2(AFMT_AUDIO_PACKET_CONTROL2,
+ AFMT_AUDIO_LAYOUT_OVRD, 0,
+ AFMT_60958_OSF_OVRD, 0);
+
+ /* AFMT_INFOFRAME_CONTROL0 */
+ REG_UPDATE(AFMT_INFOFRAME_CONTROL0, AFMT_AUDIO_INFO_UPDATE, 1);
+
+ /* AFMT_60958_0__AFMT_60958_CS_CLOCK_ACCURACY_MASK */
+ REG_UPDATE(AFMT_60958_0, AFMT_60958_CS_CLOCK_ACCURACY, 0);
+}
+
+static struct afmt_funcs dcn30_afmt_funcs = {
+ .setup_hdmi_audio = afmt3_setup_hdmi_audio,
+ .se_audio_setup = afmt3_se_audio_setup,
+ .audio_mute_control = afmt3_audio_mute_control,
+ .audio_info_immediate_update = afmt3_audio_info_immediate_update,
+ .setup_dp_audio = afmt3_setup_dp_audio,
+};
+
+void afmt3_construct(struct dcn30_afmt *afmt3,
+ struct dc_context *ctx,
+ uint32_t inst,
+ const struct dcn30_afmt_registers *afmt_regs,
+ const struct dcn30_afmt_shift *afmt_shift,
+ const struct dcn30_afmt_mask *afmt_mask)
+{
+ afmt3->base.ctx = ctx;
+
+ afmt3->base.inst = inst;
+ afmt3->base.funcs = &dcn30_afmt_funcs;
+
+ afmt3->regs = afmt_regs;
+ afmt3->afmt_shift = afmt_shift;
+ afmt3->afmt_mask = afmt_mask;
+}
diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_afmt.h b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_afmt.h
new file mode 100644
index 000000000000..08b2d8a8170c
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_afmt.h
@@ -0,0 +1,230 @@
+/*
+ * Copyright 2020 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef __DAL_DCN30_AFMT_H__
+#define __DAL_DCN30_AFMT_H__
+
+
+#define DCN30_AFMT_FROM_AFMT(afmt)\
+ container_of(afmt, struct dcn30_afmt, base)
+
+#define AFMT_DCN3_REG_LIST(id) \
+ SRI(AFMT_INFOFRAME_CONTROL0, AFMT, id), \
+ SRI(AFMT_VBI_PACKET_CONTROL, AFMT, id), \
+ SRI(AFMT_AUDIO_PACKET_CONTROL, AFMT, id), \
+ SRI(AFMT_AUDIO_PACKET_CONTROL2, AFMT, id), \
+ SRI(AFMT_AUDIO_SRC_CONTROL, AFMT, id), \
+ SRI(AFMT_60958_0, AFMT, id), \
+ SRI(AFMT_60958_1, AFMT, id), \
+ SRI(AFMT_60958_2, AFMT, id), \
+ SRI(AFMT_MEM_PWR, AFMT, id)
+
+struct dcn30_afmt_registers {
+ uint32_t AFMT_INFOFRAME_CONTROL0;
+ uint32_t AFMT_VBI_PACKET_CONTROL;
+ uint32_t AFMT_AUDIO_PACKET_CONTROL;
+ uint32_t AFMT_AUDIO_PACKET_CONTROL2;
+ uint32_t AFMT_AUDIO_SRC_CONTROL;
+ uint32_t AFMT_60958_0;
+ uint32_t AFMT_60958_1;
+ uint32_t AFMT_60958_2;
+ uint32_t AFMT_MEM_PWR;
+};
+
+#define DCN3_AFMT_MASK_SH_LIST(mask_sh)\
+ SE_SF(AFMT0_AFMT_INFOFRAME_CONTROL0, AFMT_AUDIO_INFO_UPDATE, mask_sh),\
+ SE_SF(AFMT0_AFMT_AUDIO_SRC_CONTROL, AFMT_AUDIO_SRC_SELECT, mask_sh),\
+ SE_SF(AFMT0_AFMT_AUDIO_PACKET_CONTROL2, AFMT_AUDIO_CHANNEL_ENABLE, mask_sh),\
+ SE_SF(AFMT0_AFMT_AUDIO_PACKET_CONTROL, AFMT_60958_CS_UPDATE, mask_sh),\
+ SE_SF(AFMT0_AFMT_AUDIO_PACKET_CONTROL2, AFMT_AUDIO_LAYOUT_OVRD, mask_sh),\
+ SE_SF(AFMT0_AFMT_AUDIO_PACKET_CONTROL2, AFMT_60958_OSF_OVRD, mask_sh),\
+ SE_SF(AFMT0_AFMT_60958_0, AFMT_60958_CS_CHANNEL_NUMBER_L, mask_sh),\
+ SE_SF(AFMT0_AFMT_60958_0, AFMT_60958_CS_CLOCK_ACCURACY, mask_sh),\
+ SE_SF(AFMT0_AFMT_60958_1, AFMT_60958_CS_CHANNEL_NUMBER_R, mask_sh),\
+ SE_SF(AFMT0_AFMT_60958_2, AFMT_60958_CS_CHANNEL_NUMBER_2, mask_sh),\
+ SE_SF(AFMT0_AFMT_60958_2, AFMT_60958_CS_CHANNEL_NUMBER_3, mask_sh),\
+ SE_SF(AFMT0_AFMT_60958_2, AFMT_60958_CS_CHANNEL_NUMBER_4, mask_sh),\
+ SE_SF(AFMT0_AFMT_60958_2, AFMT_60958_CS_CHANNEL_NUMBER_5, mask_sh),\
+ SE_SF(AFMT0_AFMT_60958_2, AFMT_60958_CS_CHANNEL_NUMBER_6, mask_sh),\
+ SE_SF(AFMT0_AFMT_60958_2, AFMT_60958_CS_CHANNEL_NUMBER_7, mask_sh),\
+ SE_SF(AFMT0_AFMT_AUDIO_PACKET_CONTROL, AFMT_AUDIO_SAMPLE_SEND, mask_sh),\
+ SE_SF(AFMT0_AFMT_MEM_PWR, AFMT_MEM_PWR_FORCE, mask_sh)
+
+#define AFMT_DCN3_REG_FIELD_LIST(type) \
+ type AFMT_AUDIO_INFO_UPDATE;\
+ type AFMT_AUDIO_SRC_SELECT;\
+ type AFMT_AUDIO_CHANNEL_ENABLE;\
+ type AFMT_60958_CS_UPDATE;\
+ type AFMT_AUDIO_LAYOUT_OVRD;\
+ type AFMT_60958_OSF_OVRD;\
+ type AFMT_60958_CS_CHANNEL_NUMBER_L;\
+ type AFMT_60958_CS_CLOCK_ACCURACY;\
+ type AFMT_60958_CS_CHANNEL_NUMBER_R;\
+ type AFMT_60958_CS_CHANNEL_NUMBER_2;\
+ type AFMT_60958_CS_CHANNEL_NUMBER_3;\
+ type AFMT_60958_CS_CHANNEL_NUMBER_4;\
+ type AFMT_60958_CS_CHANNEL_NUMBER_5;\
+ type AFMT_60958_CS_CHANNEL_NUMBER_6;\
+ type AFMT_60958_CS_CHANNEL_NUMBER_7;\
+ type AFMT_AUDIO_SAMPLE_SEND;\
+ type AFMT_MEM_PWR_FORCE
+
+struct dcn30_afmt_shift {
+ AFMT_DCN3_REG_FIELD_LIST(uint8_t);
+};
+
+struct dcn30_afmt_mask {
+ AFMT_DCN3_REG_FIELD_LIST(uint32_t);
+};
+
+
+/**
+* speakersToChannels
+*
+* @brief
+* translate speakers to channels
+*
+* FL - Front Left
+* FR - Front Right
+* RL - Rear Left
+* RR - Rear Right
+* RC - Rear Center
+* FC - Front Center
+* FLC - Front Left Center
+* FRC - Front Right Center
+* RLC - Rear Left Center
+* RRC - Rear Right Center
+* LFE - Low Freq Effect
+*
+* FC
+* FLC FRC
+* FL FR
+*
+* LFE
+* ()
+*
+*
+* RL RR
+* RLC RRC
+* RC
+*
+* ch 8 7 6 5 4 3 2 1
+* 0b00000011 - - - - - - FR FL
+* 0b00000111 - - - - - LFE FR FL
+* 0b00001011 - - - - FC - FR FL
+* 0b00001111 - - - - FC LFE FR FL
+* 0b00010011 - - - RC - - FR FL
+* 0b00010111 - - - RC - LFE FR FL
+* 0b00011011 - - - RC FC - FR FL
+* 0b00011111 - - - RC FC LFE FR FL
+* 0b00110011 - - RR RL - - FR FL
+* 0b00110111 - - RR RL - LFE FR FL
+* 0b00111011 - - RR RL FC - FR FL
+* 0b00111111 - - RR RL FC LFE FR FL
+* 0b01110011 - RC RR RL - - FR FL
+* 0b01110111 - RC RR RL - LFE FR FL
+* 0b01111011 - RC RR RL FC - FR FL
+* 0b01111111 - RC RR RL FC LFE FR FL
+* 0b11110011 RRC RLC RR RL - - FR FL
+* 0b11110111 RRC RLC RR RL - LFE FR FL
+* 0b11111011 RRC RLC RR RL FC - FR FL
+* 0b11111111 RRC RLC RR RL FC LFE FR FL
+* 0b11000011 FRC FLC - - - - FR FL
+* 0b11000111 FRC FLC - - - LFE FR FL
+* 0b11001011 FRC FLC - - FC - FR FL
+* 0b11001111 FRC FLC - - FC LFE FR FL
+* 0b11010011 FRC FLC - RC - - FR FL
+* 0b11010111 FRC FLC - RC - LFE FR FL
+* 0b11011011 FRC FLC - RC FC - FR FL
+* 0b11011111 FRC FLC - RC FC LFE FR FL
+* 0b11110011 FRC FLC RR RL - - FR FL
+* 0b11110111 FRC FLC RR RL - LFE FR FL
+* 0b11111011 FRC FLC RR RL FC - FR FL
+* 0b11111111 FRC FLC RR RL FC LFE FR FL
+*
+* @param
+* speakers - speaker information as it comes from CEA audio block
+*/
+/* translate speakers to channels */
+
+union audio_cea_channels {
+ uint8_t all;
+ struct audio_cea_channels_bits {
+ uint32_t FL:1;
+ uint32_t FR:1;
+ uint32_t LFE:1;
+ uint32_t FC:1;
+ uint32_t RL_RC:1;
+ uint32_t RR:1;
+ uint32_t RC_RLC_FLC:1;
+ uint32_t RRC_FRC:1;
+ } channels;
+};
+
+struct afmt;
+
+struct afmt_funcs {
+
+ void (*setup_hdmi_audio)(
+ struct afmt *afmt);
+
+ void (*se_audio_setup)(
+ struct afmt *afmt,
+ unsigned int az_inst,
+ struct audio_info *audio_info);
+
+ void (*audio_mute_control)(
+ struct afmt *afmt,
+ bool mute);
+
+ void (*audio_info_immediate_update)(
+ struct afmt *afmt);
+
+ void (*setup_dp_audio)(
+ struct afmt *afmt);
+};
+
+struct afmt {
+ const struct afmt_funcs *funcs;
+ struct dc_context *ctx;
+ int inst;
+};
+
+struct dcn30_afmt {
+ struct afmt base;
+ const struct dcn30_afmt_registers *regs;
+ const struct dcn30_afmt_shift *afmt_shift;
+ const struct dcn30_afmt_mask *afmt_mask;
+};
+
+void afmt3_construct(struct dcn30_afmt *afmt3,
+ struct dc_context *ctx,
+ uint32_t inst,
+ const struct dcn30_afmt_registers *afmt_regs,
+ const struct dcn30_afmt_shift *afmt_shift,
+ const struct dcn30_afmt_mask *afmt_mask);
+
+
+#endif
diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_cm_common.c b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_cm_common.c
new file mode 100644
index 000000000000..a139a87a1a81
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_cm_common.c
@@ -0,0 +1,640 @@
+/*
+ * Copyright 2020 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#include "dm_services.h"
+#include "core_types.h"
+#include "reg_helper.h"
+#include "dcn30_dpp.h"
+#include "basics/conversion.h"
+#include "dcn30_cm_common.h"
+#include "custom_float.h"
+
+#define REG(reg) reg
+
+#define CTX \
+ ctx //dpp->base.ctx
+
+#undef FN
+#define FN(reg_name, field_name) \
+ reg->shifts.field_name, reg->masks.field_name
+
+void cm_helper_program_gamcor_xfer_func(
+ struct dc_context *ctx,
+ const struct pwl_params *params,
+ const struct dcn3_xfer_func_reg *reg)
+{
+ uint32_t reg_region_cur;
+ unsigned int i = 0;
+
+ REG_SET_2(reg->start_cntl_b, 0,
+ exp_region_start, params->corner_points[0].blue.custom_float_x,
+ exp_resion_start_segment, 0);
+ REG_SET_2(reg->start_cntl_g, 0,
+ exp_region_start, params->corner_points[0].green.custom_float_x,
+ exp_resion_start_segment, 0);
+ REG_SET_2(reg->start_cntl_r, 0,
+ exp_region_start, params->corner_points[0].red.custom_float_x,
+ exp_resion_start_segment, 0);
+
+ REG_SET(reg->start_slope_cntl_b, 0, //linear slope at start of curve
+ field_region_linear_slope, params->corner_points[0].blue.custom_float_slope);
+ REG_SET(reg->start_slope_cntl_g, 0,
+ field_region_linear_slope, params->corner_points[0].green.custom_float_slope);
+ REG_SET(reg->start_slope_cntl_r, 0,
+ field_region_linear_slope, params->corner_points[0].red.custom_float_slope);
+
+ REG_SET(reg->start_end_cntl1_b, 0,
+ field_region_end_base, params->corner_points[1].blue.custom_float_y);
+ REG_SET(reg->start_end_cntl1_g, 0,
+ field_region_end_base, params->corner_points[1].green.custom_float_y);
+ REG_SET(reg->start_end_cntl1_r, 0,
+ field_region_end_base, params->corner_points[1].red.custom_float_y);
+
+ REG_SET_2(reg->start_end_cntl2_b, 0,
+ field_region_end_slope, params->corner_points[1].blue.custom_float_slope,
+ field_region_end, params->corner_points[1].blue.custom_float_x);
+ REG_SET_2(reg->start_end_cntl2_g, 0,
+ field_region_end_slope, params->corner_points[1].green.custom_float_slope,
+ field_region_end, params->corner_points[1].green.custom_float_x);
+ REG_SET_2(reg->start_end_cntl2_r, 0,
+ field_region_end_slope, params->corner_points[1].red.custom_float_slope,
+ field_region_end, params->corner_points[1].red.custom_float_x);
+
+ for (reg_region_cur = reg->region_start;
+ reg_region_cur <= reg->region_end;
+ reg_region_cur++) {
+
+ const struct gamma_curve *curve0 = &(params->arr_curve_points[2 * i]);
+ const struct gamma_curve *curve1 = &(params->arr_curve_points[(2 * i) + 1]);
+
+ REG_SET_4(reg_region_cur, 0,
+ exp_region0_lut_offset, curve0->offset,
+ exp_region0_num_segments, curve0->segments_num,
+ exp_region1_lut_offset, curve1->offset,
+ exp_region1_num_segments, curve1->segments_num);
+
+ i++;
+ }
+}
+
+/* driver uses 32 regions or less, but DCN HW has 34, extra 2 are set to 0 */
+#define MAX_REGIONS_NUMBER 34
+#define MAX_LOW_POINT 25
+#define NUMBER_REGIONS 32
+#define NUMBER_SW_SEGMENTS 16
+
+bool cm3_helper_translate_curve_to_hw_format(
+ const struct dc_transfer_func *output_tf,
+ struct pwl_params *lut_params, bool fixpoint)
+{
+ struct curve_points3 *corner_points;
+ struct pwl_result_data *rgb_resulted;
+ struct pwl_result_data *rgb;
+ struct pwl_result_data *rgb_plus_1;
+ struct fixed31_32 end_value;
+
+ int32_t region_start, region_end;
+ int32_t i;
+ uint32_t j, k, seg_distr[MAX_REGIONS_NUMBER], increment, start_index, hw_points;
+
+ if (output_tf == NULL || lut_params == NULL || output_tf->type == TF_TYPE_BYPASS)
+ return false;
+
+ PERF_TRACE_CTX(output_tf->ctx);
+
+ corner_points = lut_params->corner_points;
+ rgb_resulted = lut_params->rgb_resulted;
+ hw_points = 0;
+
+ memset(lut_params, 0, sizeof(struct pwl_params));
+ memset(seg_distr, 0, sizeof(seg_distr));
+
+ if (output_tf->tf == TRANSFER_FUNCTION_PQ || output_tf->tf == TRANSFER_FUNCTION_GAMMA22 ||
+ output_tf->tf == TRANSFER_FUNCTION_HLG) {
+ /* 32 segments
+ * segments are from 2^-25 to 2^7
+ */
+ for (i = 0; i < NUMBER_REGIONS ; i++)
+ seg_distr[i] = 3;
+
+ region_start = -MAX_LOW_POINT;
+ region_end = NUMBER_REGIONS - MAX_LOW_POINT;
+ } else {
+ /* 10 segments
+ * segment is from 2^-10 to 2^0
+ * There are less than 256 points, for optimization
+ */
+ seg_distr[0] = 3;
+ seg_distr[1] = 4;
+ seg_distr[2] = 4;
+ seg_distr[3] = 4;
+ seg_distr[4] = 4;
+ seg_distr[5] = 4;
+ seg_distr[6] = 4;
+ seg_distr[7] = 4;
+ seg_distr[8] = 4;
+ seg_distr[9] = 4;
+
+ region_start = -10;
+ region_end = 0;
+ }
+
+ for (i = region_end - region_start; i < MAX_REGIONS_NUMBER ; i++)
+ seg_distr[i] = -1;
+
+ for (k = 0; k < MAX_REGIONS_NUMBER; k++) {
+ if (seg_distr[k] != -1)
+ hw_points += (1 << seg_distr[k]);
+ }
+
+ j = 0;
+ for (k = 0; k < (region_end - region_start); k++) {
+ increment = NUMBER_SW_SEGMENTS / (1 << seg_distr[k]);
+ start_index = (region_start + k + MAX_LOW_POINT) *
+ NUMBER_SW_SEGMENTS;
+ for (i = start_index; i < start_index + NUMBER_SW_SEGMENTS;
+ i += increment) {
+ if (j == hw_points - 1)
+ break;
+ rgb_resulted[j].red = output_tf->tf_pts.red[i];
+ rgb_resulted[j].green = output_tf->tf_pts.green[i];
+ rgb_resulted[j].blue = output_tf->tf_pts.blue[i];
+ j++;
+ }
+ }
+
+ /* last point */
+ start_index = (region_end + MAX_LOW_POINT) * NUMBER_SW_SEGMENTS;
+ rgb_resulted[hw_points - 1].red = output_tf->tf_pts.red[start_index];
+ rgb_resulted[hw_points - 1].green = output_tf->tf_pts.green[start_index];
+ rgb_resulted[hw_points - 1].blue = output_tf->tf_pts.blue[start_index];
+
+ // All 3 color channels have same x
+ corner_points[0].red.x = dc_fixpt_pow(dc_fixpt_from_int(2),
+ dc_fixpt_from_int(region_start));
+ corner_points[0].green.x = corner_points[0].red.x;
+ corner_points[0].blue.x = corner_points[0].red.x;
+
+ corner_points[1].red.x = dc_fixpt_pow(dc_fixpt_from_int(2),
+ dc_fixpt_from_int(region_end));
+ corner_points[1].green.x = corner_points[1].red.x;
+ corner_points[1].blue.x = corner_points[1].red.x;
+
+ corner_points[0].red.y = rgb_resulted[0].red;
+ corner_points[0].green.y = rgb_resulted[0].green;
+ corner_points[0].blue.y = rgb_resulted[0].blue;
+
+ corner_points[0].red.slope = dc_fixpt_div(corner_points[0].red.y,
+ corner_points[0].red.x);
+ corner_points[0].green.slope = dc_fixpt_div(corner_points[0].green.y,
+ corner_points[0].green.x);
+ corner_points[0].blue.slope = dc_fixpt_div(corner_points[0].blue.y,
+ corner_points[0].blue.x);
+
+ /* see comment above, m_arrPoints[1].y should be the Y value for the
+ * region end (m_numOfHwPoints), not last HW point(m_numOfHwPoints - 1)
+ */
+ corner_points[1].red.y = rgb_resulted[hw_points - 1].red;
+ corner_points[1].green.y = rgb_resulted[hw_points - 1].green;
+ corner_points[1].blue.y = rgb_resulted[hw_points - 1].blue;
+ corner_points[1].red.slope = dc_fixpt_zero;
+ corner_points[1].green.slope = dc_fixpt_zero;
+ corner_points[1].blue.slope = dc_fixpt_zero;
+
+ if (output_tf->tf == TRANSFER_FUNCTION_PQ || output_tf->tf == TRANSFER_FUNCTION_HLG) {
+ /* for PQ/HLG, we want to have a straight line from last HW X point,
+ * and the slope to be such that we hit 1.0 at 10000/1000 nits.
+ */
+
+ if (output_tf->tf == TRANSFER_FUNCTION_PQ)
+ end_value = dc_fixpt_from_int(125);
+ else
+ end_value = dc_fixpt_from_fraction(125, 10);
+
+ corner_points[1].red.slope = dc_fixpt_div(
+ dc_fixpt_sub(dc_fixpt_one, corner_points[1].red.y),
+ dc_fixpt_sub(end_value, corner_points[1].red.x));
+ corner_points[1].green.slope = dc_fixpt_div(
+ dc_fixpt_sub(dc_fixpt_one, corner_points[1].green.y),
+ dc_fixpt_sub(end_value, corner_points[1].green.x));
+ corner_points[1].blue.slope = dc_fixpt_div(
+ dc_fixpt_sub(dc_fixpt_one, corner_points[1].blue.y),
+ dc_fixpt_sub(end_value, corner_points[1].blue.x));
+ }
+ lut_params->hw_points_num = hw_points;
+
+ k = 0;
+ for (i = 1; i < MAX_REGIONS_NUMBER; i++) {
+ if (seg_distr[k] != -1) {
+ lut_params->arr_curve_points[k].segments_num =
+ seg_distr[k];
+ lut_params->arr_curve_points[i].offset =
+ lut_params->arr_curve_points[k].offset + (1 << seg_distr[k]);
+ }
+ k++;
+ }
+
+ if (seg_distr[k] != -1)
+ lut_params->arr_curve_points[k].segments_num = seg_distr[k];
+
+ rgb = rgb_resulted;
+ rgb_plus_1 = rgb_resulted + 1;
+
+ i = 1;
+ while (i != hw_points + 1) {
+ if (dc_fixpt_lt(rgb_plus_1->red, rgb->red))
+ rgb_plus_1->red = rgb->red;
+ if (dc_fixpt_lt(rgb_plus_1->green, rgb->green))
+ rgb_plus_1->green = rgb->green;
+ if (dc_fixpt_lt(rgb_plus_1->blue, rgb->blue))
+ rgb_plus_1->blue = rgb->blue;
+
+ rgb->delta_red = dc_fixpt_sub(rgb_plus_1->red, rgb->red);
+ rgb->delta_green = dc_fixpt_sub(rgb_plus_1->green, rgb->green);
+ rgb->delta_blue = dc_fixpt_sub(rgb_plus_1->blue, rgb->blue);
+
+ if (fixpoint == true) {
+ rgb->delta_red_reg = dc_fixpt_clamp_u0d10(rgb->delta_red);
+ rgb->delta_green_reg = dc_fixpt_clamp_u0d10(rgb->delta_green);
+ rgb->delta_blue_reg = dc_fixpt_clamp_u0d10(rgb->delta_blue);
+ rgb->red_reg = dc_fixpt_clamp_u0d14(rgb->red);
+ rgb->green_reg = dc_fixpt_clamp_u0d14(rgb->green);
+ rgb->blue_reg = dc_fixpt_clamp_u0d14(rgb->blue);
+ }
+
+ ++rgb_plus_1;
+ ++rgb;
+ ++i;
+ }
+ cm3_helper_convert_to_custom_float(rgb_resulted,
+ lut_params->corner_points,
+ hw_points, fixpoint);
+
+ return true;
+}
+
+#define NUM_DEGAMMA_REGIONS 12
+
+
+bool cm3_helper_translate_curve_to_degamma_hw_format(
+ const struct dc_transfer_func *output_tf,
+ struct pwl_params *lut_params)
+{
+ struct curve_points3 *corner_points;
+ struct pwl_result_data *rgb_resulted;
+ struct pwl_result_data *rgb;
+ struct pwl_result_data *rgb_plus_1;
+
+ int32_t region_start, region_end;
+ int32_t i;
+ uint32_t j, k, seg_distr[MAX_REGIONS_NUMBER], increment, start_index, hw_points;
+
+ if (output_tf == NULL || lut_params == NULL || output_tf->type == TF_TYPE_BYPASS)
+ return false;
+
+ PERF_TRACE_CTX(output_tf->ctx);
+
+ corner_points = lut_params->corner_points;
+ rgb_resulted = lut_params->rgb_resulted;
+ hw_points = 0;
+
+ memset(lut_params, 0, sizeof(struct pwl_params));
+ memset(seg_distr, 0, sizeof(seg_distr));
+
+ region_start = -NUM_DEGAMMA_REGIONS;
+ region_end = 0;
+
+
+ for (i = region_end - region_start; i < MAX_REGIONS_NUMBER ; i++)
+ seg_distr[i] = -1;
+ /* 12 segments
+ * segments are from 2^-12 to 0
+ */
+ for (i = 0; i < NUM_DEGAMMA_REGIONS ; i++)
+ seg_distr[i] = 4;
+
+ for (k = 0; k < MAX_REGIONS_NUMBER; k++) {
+ if (seg_distr[k] != -1)
+ hw_points += (1 << seg_distr[k]);
+ }
+
+ j = 0;
+ for (k = 0; k < (region_end - region_start); k++) {
+ increment = NUMBER_SW_SEGMENTS / (1 << seg_distr[k]);
+ start_index = (region_start + k + MAX_LOW_POINT) *
+ NUMBER_SW_SEGMENTS;
+ for (i = start_index; i < start_index + NUMBER_SW_SEGMENTS;
+ i += increment) {
+ if (j == hw_points - 1)
+ break;
+ rgb_resulted[j].red = output_tf->tf_pts.red[i];
+ rgb_resulted[j].green = output_tf->tf_pts.green[i];
+ rgb_resulted[j].blue = output_tf->tf_pts.blue[i];
+ j++;
+ }
+ }
+
+ /* last point */
+ start_index = (region_end + MAX_LOW_POINT) * NUMBER_SW_SEGMENTS;
+ rgb_resulted[hw_points - 1].red = output_tf->tf_pts.red[start_index];
+ rgb_resulted[hw_points - 1].green = output_tf->tf_pts.green[start_index];
+ rgb_resulted[hw_points - 1].blue = output_tf->tf_pts.blue[start_index];
+
+ corner_points[0].red.x = dc_fixpt_pow(dc_fixpt_from_int(2),
+ dc_fixpt_from_int(region_start));
+ corner_points[0].green.x = corner_points[0].red.x;
+ corner_points[0].blue.x = corner_points[0].red.x;
+ corner_points[1].red.x = dc_fixpt_pow(dc_fixpt_from_int(2),
+ dc_fixpt_from_int(region_end));
+ corner_points[1].green.x = corner_points[1].red.x;
+ corner_points[1].blue.x = corner_points[1].red.x;
+
+ corner_points[0].red.y = rgb_resulted[0].red;
+ corner_points[0].green.y = rgb_resulted[0].green;
+ corner_points[0].blue.y = rgb_resulted[0].blue;
+
+ /* see comment above, m_arrPoints[1].y should be the Y value for the
+ * region end (m_numOfHwPoints), not last HW point(m_numOfHwPoints - 1)
+ */
+ corner_points[1].red.y = rgb_resulted[hw_points - 1].red;
+ corner_points[1].green.y = rgb_resulted[hw_points - 1].green;
+ corner_points[1].blue.y = rgb_resulted[hw_points - 1].blue;
+ corner_points[1].red.slope = dc_fixpt_zero;
+ corner_points[1].green.slope = dc_fixpt_zero;
+ corner_points[1].blue.slope = dc_fixpt_zero;
+
+ if (output_tf->tf == TRANSFER_FUNCTION_PQ) {
+ /* for PQ, we want to have a straight line from last HW X point,
+ * and the slope to be such that we hit 1.0 at 10000 nits.
+ */
+ const struct fixed31_32 end_value =
+ dc_fixpt_from_int(125);
+
+ corner_points[1].red.slope = dc_fixpt_div(
+ dc_fixpt_sub(dc_fixpt_one, corner_points[1].red.y),
+ dc_fixpt_sub(end_value, corner_points[1].red.x));
+ corner_points[1].green.slope = dc_fixpt_div(
+ dc_fixpt_sub(dc_fixpt_one, corner_points[1].green.y),
+ dc_fixpt_sub(end_value, corner_points[1].green.x));
+ corner_points[1].blue.slope = dc_fixpt_div(
+ dc_fixpt_sub(dc_fixpt_one, corner_points[1].blue.y),
+ dc_fixpt_sub(end_value, corner_points[1].blue.x));
+ }
+
+ lut_params->hw_points_num = hw_points;
+
+ k = 0;
+ for (i = 1; i < MAX_REGIONS_NUMBER; i++) {
+ if (seg_distr[k] != -1) {
+ lut_params->arr_curve_points[k].segments_num =
+ seg_distr[k];
+ lut_params->arr_curve_points[i].offset =
+ lut_params->arr_curve_points[k].offset + (1 << seg_distr[k]);
+ }
+ k++;
+ }
+
+ if (seg_distr[k] != -1)
+ lut_params->arr_curve_points[k].segments_num = seg_distr[k];
+
+ rgb = rgb_resulted;
+ rgb_plus_1 = rgb_resulted + 1;
+
+ i = 1;
+ while (i != hw_points + 1) {
+ if (dc_fixpt_lt(rgb_plus_1->red, rgb->red))
+ rgb_plus_1->red = rgb->red;
+ if (dc_fixpt_lt(rgb_plus_1->green, rgb->green))
+ rgb_plus_1->green = rgb->green;
+ if (dc_fixpt_lt(rgb_plus_1->blue, rgb->blue))
+ rgb_plus_1->blue = rgb->blue;
+
+ rgb->delta_red = dc_fixpt_sub(rgb_plus_1->red, rgb->red);
+ rgb->delta_green = dc_fixpt_sub(rgb_plus_1->green, rgb->green);
+ rgb->delta_blue = dc_fixpt_sub(rgb_plus_1->blue, rgb->blue);
+
+ ++rgb_plus_1;
+ ++rgb;
+ ++i;
+ }
+ cm3_helper_convert_to_custom_float(rgb_resulted,
+ lut_params->corner_points,
+ hw_points, false);
+
+ return true;
+}
+
+bool cm3_helper_convert_to_custom_float(
+ struct pwl_result_data *rgb_resulted,
+ struct curve_points3 *corner_points,
+ uint32_t hw_points_num,
+ bool fixpoint)
+{
+ struct custom_float_format fmt;
+
+ struct pwl_result_data *rgb = rgb_resulted;
+
+ uint32_t i = 0;
+
+ fmt.exponenta_bits = 6;
+ fmt.mantissa_bits = 12;
+ fmt.sign = false;
+
+ /* corner_points[0] - beginning base, slope offset for R,G,B
+ * corner_points[1] - end base, slope offset for R,G,B
+ */
+ if (!convert_to_custom_float_format(corner_points[0].red.x, &fmt,
+ &corner_points[0].red.custom_float_x)) {
+ BREAK_TO_DEBUGGER();
+ return false;
+ }
+ if (!convert_to_custom_float_format(corner_points[0].green.x, &fmt,
+ &corner_points[0].green.custom_float_x)) {
+ BREAK_TO_DEBUGGER();
+ return false;
+ }
+ if (!convert_to_custom_float_format(corner_points[0].blue.x, &fmt,
+ &corner_points[0].blue.custom_float_x)) {
+ BREAK_TO_DEBUGGER();
+ return false;
+ }
+
+ if (!convert_to_custom_float_format(corner_points[0].red.offset, &fmt,
+ &corner_points[0].red.custom_float_offset)) {
+ BREAK_TO_DEBUGGER();
+ return false;
+ }
+ if (!convert_to_custom_float_format(corner_points[0].green.offset, &fmt,
+ &corner_points[0].green.custom_float_offset)) {
+ BREAK_TO_DEBUGGER();
+ return false;
+ }
+ if (!convert_to_custom_float_format(corner_points[0].blue.offset, &fmt,
+ &corner_points[0].blue.custom_float_offset)) {
+ BREAK_TO_DEBUGGER();
+ return false;
+ }
+
+ if (!convert_to_custom_float_format(corner_points[0].red.slope, &fmt,
+ &corner_points[0].red.custom_float_slope)) {
+ BREAK_TO_DEBUGGER();
+ return false;
+ }
+ if (!convert_to_custom_float_format(corner_points[0].green.slope, &fmt,
+ &corner_points[0].green.custom_float_slope)) {
+ BREAK_TO_DEBUGGER();
+ return false;
+ }
+ if (!convert_to_custom_float_format(corner_points[0].blue.slope, &fmt,
+ &corner_points[0].blue.custom_float_slope)) {
+ BREAK_TO_DEBUGGER();
+ return false;
+ }
+
+ if (fixpoint == true) {
+ corner_points[1].red.custom_float_y =
+ dc_fixpt_clamp_u0d14(corner_points[1].red.y);
+ corner_points[1].green.custom_float_y =
+ dc_fixpt_clamp_u0d14(corner_points[1].green.y);
+ corner_points[1].blue.custom_float_y =
+ dc_fixpt_clamp_u0d14(corner_points[1].blue.y);
+ } else {
+ if (!convert_to_custom_float_format(corner_points[1].red.y,
+ &fmt, &corner_points[1].red.custom_float_y)) {
+ BREAK_TO_DEBUGGER();
+ return false;
+ }
+ if (!convert_to_custom_float_format(corner_points[1].green.y,
+ &fmt, &corner_points[1].green.custom_float_y)) {
+ BREAK_TO_DEBUGGER();
+ return false;
+ }
+ if (!convert_to_custom_float_format(corner_points[1].blue.y,
+ &fmt, &corner_points[1].blue.custom_float_y)) {
+ BREAK_TO_DEBUGGER();
+ return false;
+ }
+ }
+
+ fmt.mantissa_bits = 10;
+ fmt.sign = false;
+
+ if (!convert_to_custom_float_format(corner_points[1].red.x, &fmt,
+ &corner_points[1].red.custom_float_x)) {
+ BREAK_TO_DEBUGGER();
+ return false;
+ }
+ if (!convert_to_custom_float_format(corner_points[1].green.x, &fmt,
+ &corner_points[1].green.custom_float_x)) {
+ BREAK_TO_DEBUGGER();
+ return false;
+ }
+ if (!convert_to_custom_float_format(corner_points[1].blue.x, &fmt,
+ &corner_points[1].blue.custom_float_x)) {
+ BREAK_TO_DEBUGGER();
+ return false;
+ }
+
+ if (!convert_to_custom_float_format(corner_points[1].red.slope, &fmt,
+ &corner_points[1].red.custom_float_slope)) {
+ BREAK_TO_DEBUGGER();
+ return false;
+ }
+ if (!convert_to_custom_float_format(corner_points[1].green.slope, &fmt,
+ &corner_points[1].green.custom_float_slope)) {
+ BREAK_TO_DEBUGGER();
+ return false;
+ }
+ if (!convert_to_custom_float_format(corner_points[1].blue.slope, &fmt,
+ &corner_points[1].blue.custom_float_slope)) {
+ BREAK_TO_DEBUGGER();
+ return false;
+ }
+
+ if (hw_points_num == 0 || rgb_resulted == NULL || fixpoint == true)
+ return true;
+
+ fmt.mantissa_bits = 12;
+
+ while (i != hw_points_num) {
+ if (!convert_to_custom_float_format(rgb->red, &fmt,
+ &rgb->red_reg)) {
+ BREAK_TO_DEBUGGER();
+ return false;
+ }
+
+ if (!convert_to_custom_float_format(rgb->green, &fmt,
+ &rgb->green_reg)) {
+ BREAK_TO_DEBUGGER();
+ return false;
+ }
+
+ if (!convert_to_custom_float_format(rgb->blue, &fmt,
+ &rgb->blue_reg)) {
+ BREAK_TO_DEBUGGER();
+ return false;
+ }
+
+ if (!convert_to_custom_float_format(rgb->delta_red, &fmt,
+ &rgb->delta_red_reg)) {
+ BREAK_TO_DEBUGGER();
+ return false;
+ }
+
+ if (!convert_to_custom_float_format(rgb->delta_green, &fmt,
+ &rgb->delta_green_reg)) {
+ BREAK_TO_DEBUGGER();
+ return false;
+ }
+
+ if (!convert_to_custom_float_format(rgb->delta_blue, &fmt,
+ &rgb->delta_blue_reg)) {
+ BREAK_TO_DEBUGGER();
+ return false;
+ }
+
+ ++rgb;
+ ++i;
+ }
+
+ return true;
+}
+
+bool is_rgb_equal(const struct pwl_result_data *rgb, uint32_t num)
+{
+ uint32_t i;
+ bool ret = true;
+
+ for (i = 0 ; i < num; i++) {
+ if (rgb[i].red_reg != rgb[i].green_reg ||
+ rgb[i].blue_reg != rgb[i].red_reg ||
+ rgb[i].blue_reg != rgb[i].green_reg) {
+ ret = false;
+ break;
+ }
+ }
+ return ret;
+}
+
diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_cm_common.h b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_cm_common.h
new file mode 100644
index 000000000000..bd98b327a6c7
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_cm_common.h
@@ -0,0 +1,78 @@
+/*
+ * Copyright 2020 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#include "dcn10/dcn10_cm_common.h"
+
+#ifndef __DAL_DCN30_CM_COMMON_H__
+#define __DAL_DCN30_CM_COMMON_H__
+
+#define TF_HELPER_REG_FIELD_LIST_DCN3(type) \
+ TF_HELPER_REG_FIELD_LIST(type);\
+ type field_region_start_base;\
+ type field_offset
+
+struct DCN3_xfer_func_shift {
+ TF_HELPER_REG_FIELD_LIST_DCN3(uint8_t);
+};
+
+struct DCN3_xfer_func_mask {
+ TF_HELPER_REG_FIELD_LIST_DCN3(uint32_t);
+};
+
+struct dcn3_xfer_func_reg {
+ struct DCN3_xfer_func_shift shifts;
+ struct DCN3_xfer_func_mask masks;
+
+ TF_HELPER_REG_LIST;
+ uint32_t offset_b;
+ uint32_t offset_g;
+ uint32_t offset_r;
+ uint32_t start_base_cntl_b;
+ uint32_t start_base_cntl_g;
+ uint32_t start_base_cntl_r;
+};
+
+void cm_helper_program_gamcor_xfer_func(
+ struct dc_context *ctx,
+ const struct pwl_params *params,
+ const struct dcn3_xfer_func_reg *reg);
+
+bool cm3_helper_translate_curve_to_hw_format(
+ const struct dc_transfer_func *output_tf,
+ struct pwl_params *lut_params, bool fixpoint);
+
+bool cm3_helper_translate_curve_to_degamma_hw_format(
+ const struct dc_transfer_func *output_tf,
+ struct pwl_params *lut_params);
+
+bool cm3_helper_convert_to_custom_float(
+ struct pwl_result_data *rgb_resulted,
+ struct curve_points3 *corner_points,
+ uint32_t hw_points_num,
+ bool fixpoint);
+
+bool is_rgb_equal(const struct pwl_result_data *rgb, uint32_t num);
+
+#endif
diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dccg.c b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dccg.c
new file mode 100644
index 000000000000..b822a13e40ce
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dccg.c
@@ -0,0 +1,100 @@
+/*
+ * Copyright 2020 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#include "reg_helper.h"
+#include "core_types.h"
+#include "dcn30_dccg.h"
+
+#define TO_DCN_DCCG(dccg)\
+ container_of(dccg, struct dcn_dccg, base)
+
+#define REG(reg) \
+ (dccg_dcn->regs->reg)
+
+#undef FN
+#define FN(reg_name, field_name) \
+ dccg_dcn->dccg_shift->field_name, dccg_dcn->dccg_mask->field_name
+
+#define CTX \
+ dccg_dcn->base.ctx
+#define DC_LOGGER \
+ dccg->ctx->logger
+
+
+static const struct dccg_funcs dccg3_funcs = {
+ .update_dpp_dto = dccg2_update_dpp_dto,
+ .get_dccg_ref_freq = dccg2_get_dccg_ref_freq,
+ .dccg_init = dccg2_init
+};
+
+struct dccg *dccg3_create(
+ struct dc_context *ctx,
+ const struct dccg_registers *regs,
+ const struct dccg_shift *dccg_shift,
+ const struct dccg_mask *dccg_mask)
+{
+ struct dcn_dccg *dccg_dcn = kzalloc(sizeof(*dccg_dcn), GFP_KERNEL);
+ struct dccg *base;
+
+ if (dccg_dcn == NULL) {
+ BREAK_TO_DEBUGGER();
+ return NULL;
+ }
+
+ base = &dccg_dcn->base;
+ base->ctx = ctx;
+ base->funcs = &dccg3_funcs;
+
+ dccg_dcn->regs = regs;
+ dccg_dcn->dccg_shift = dccg_shift;
+ dccg_dcn->dccg_mask = dccg_mask;
+
+ return &dccg_dcn->base;
+}
+
+struct dccg *dccg30_create(
+ struct dc_context *ctx,
+ const struct dccg_registers *regs,
+ const struct dccg_shift *dccg_shift,
+ const struct dccg_mask *dccg_mask)
+{
+ struct dcn_dccg *dccg_dcn = kzalloc(sizeof(*dccg_dcn), GFP_KERNEL);
+ struct dccg *base;
+
+ if (dccg_dcn == NULL) {
+ BREAK_TO_DEBUGGER();
+ return NULL;
+ }
+
+ base = &dccg_dcn->base;
+ base->ctx = ctx;
+ base->funcs = &dccg3_funcs;
+
+ dccg_dcn->regs = regs;
+ dccg_dcn->dccg_shift = dccg_shift;
+ dccg_dcn->dccg_mask = dccg_mask;
+
+ return &dccg_dcn->base;
+}
diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dccg.h b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dccg.h
new file mode 100644
index 000000000000..029dda13a464
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dccg.h
@@ -0,0 +1,66 @@
+/*
+ * Copyright 2020 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef __DCN30_DCCG_H__
+#define __DCN30_DCCG_H__
+
+#include "dcn20/dcn20_dccg.h"
+
+
+#define DCCG_REG_LIST_DCN3AG() \
+ DCCG_COMMON_REG_LIST_DCN_BASE(),\
+ SR(PHYASYMCLK_CLOCK_CNTL),\
+ SR(PHYBSYMCLK_CLOCK_CNTL),\
+ SR(PHYCSYMCLK_CLOCK_CNTL)
+
+
+#define DCCG_REG_LIST_DCN30() \
+ DCCG_REG_LIST_DCN2(),\
+ SR(PHYASYMCLK_CLOCK_CNTL),\
+ SR(PHYBSYMCLK_CLOCK_CNTL),\
+ SR(PHYCSYMCLK_CLOCK_CNTL)
+
+#define DCCG_MASK_SH_LIST_DCN3(mask_sh) \
+ DCCG_MASK_SH_LIST_DCN2(mask_sh),\
+ DCCG_SF(PHYASYMCLK_CLOCK_CNTL, PHYASYMCLK_FORCE_EN, mask_sh),\
+ DCCG_SF(PHYASYMCLK_CLOCK_CNTL, PHYASYMCLK_FORCE_SRC_SEL, mask_sh),\
+ DCCG_SF(PHYBSYMCLK_CLOCK_CNTL, PHYBSYMCLK_FORCE_EN, mask_sh),\
+ DCCG_SF(PHYBSYMCLK_CLOCK_CNTL, PHYBSYMCLK_FORCE_SRC_SEL, mask_sh),\
+ DCCG_SF(PHYCSYMCLK_CLOCK_CNTL, PHYCSYMCLK_FORCE_EN, mask_sh),\
+ DCCG_SF(PHYCSYMCLK_CLOCK_CNTL, PHYCSYMCLK_FORCE_SRC_SEL, mask_sh)
+
+struct dccg *dccg3_create(
+ struct dc_context *ctx,
+ const struct dccg_registers *regs,
+ const struct dccg_shift *dccg_shift,
+ const struct dccg_mask *dccg_mask);
+
+struct dccg *dccg30_create(
+ struct dc_context *ctx,
+ const struct dccg_registers *regs,
+ const struct dccg_shift *dccg_shift,
+ const struct dccg_mask *dccg_mask);
+
+#endif //__DCN30_DCCG_H__
diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dio_link_encoder.c b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dio_link_encoder.c
new file mode 100644
index 000000000000..c29326e9856a
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dio_link_encoder.c
@@ -0,0 +1,205 @@
+/*
+ * Copyright 2020 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#include "reg_helper.h"
+
+#include "core_types.h"
+#include "link_encoder.h"
+#include "dcn30_dio_link_encoder.h"
+#include "stream_encoder.h"
+#include "i2caux_interface.h"
+#include "dc_bios_types.h"
+/* #include "dcn3ag/dcn3ag_phy_fw.h" */
+
+#include "gpio_service_interface.h"
+
+#define CTX \
+ enc10->base.ctx
+#define DC_LOGGER \
+ enc10->base.ctx->logger
+
+#define REG(reg)\
+ (enc10->link_regs->reg)
+
+#undef FN
+#define FN(reg_name, field_name) \
+ enc10->link_shift->field_name, enc10->link_mask->field_name
+
+#define IND_REG(index) \
+ (enc10->link_regs->index)
+
+
+static bool dcn30_link_encoder_validate_output_with_stream(
+ struct link_encoder *enc,
+ const struct dc_stream_state *stream)
+{
+ return dcn10_link_encoder_validate_output_with_stream(enc, stream);
+}
+
+static const struct link_encoder_funcs dcn30_link_enc_funcs = {
+ .read_state = link_enc2_read_state,
+ .validate_output_with_stream =
+ dcn30_link_encoder_validate_output_with_stream,
+ .hw_init = enc2_hw_init,
+ .setup = dcn10_link_encoder_setup,
+ .enable_tmds_output = dcn10_link_encoder_enable_tmds_output,
+ .enable_dp_output = dcn20_link_encoder_enable_dp_output,
+ .enable_dp_mst_output = dcn10_link_encoder_enable_dp_mst_output,
+ .disable_output = dcn10_link_encoder_disable_output,
+ .dp_set_lane_settings = dcn10_link_encoder_dp_set_lane_settings,
+ .dp_set_phy_pattern = dcn10_link_encoder_dp_set_phy_pattern,
+ .update_mst_stream_allocation_table =
+ dcn10_link_encoder_update_mst_stream_allocation_table,
+ .psr_program_dp_dphy_fast_training =
+ dcn10_psr_program_dp_dphy_fast_training,
+ .psr_program_secondary_packet = dcn10_psr_program_secondary_packet,
+ .connect_dig_be_to_fe = dcn10_link_encoder_connect_dig_be_to_fe,
+ .enable_hpd = dcn10_link_encoder_enable_hpd,
+ .disable_hpd = dcn10_link_encoder_disable_hpd,
+ .is_dig_enabled = dcn10_is_dig_enabled,
+ .destroy = dcn10_link_encoder_destroy,
+ .fec_set_enable = enc2_fec_set_enable,
+ .fec_set_ready = enc2_fec_set_ready,
+ .fec_is_active = enc2_fec_is_active,
+ .get_dig_frontend = dcn10_get_dig_frontend,
+ .get_dig_mode = dcn10_get_dig_mode,
+ .is_in_alt_mode = dcn20_link_encoder_is_in_alt_mode,
+ .get_max_link_cap = dcn20_link_encoder_get_max_link_cap,
+};
+
+void dcn30_link_encoder_construct(
+ struct dcn20_link_encoder *enc20,
+ const struct encoder_init_data *init_data,
+ const struct encoder_feature_support *enc_features,
+ const struct dcn10_link_enc_registers *link_regs,
+ const struct dcn10_link_enc_aux_registers *aux_regs,
+ const struct dcn10_link_enc_hpd_registers *hpd_regs,
+ const struct dcn10_link_enc_shift *link_shift,
+ const struct dcn10_link_enc_mask *link_mask)
+{
+ struct bp_encoder_cap_info bp_cap_info = {0};
+ const struct dc_vbios_funcs *bp_funcs = init_data->ctx->dc_bios->funcs;
+ enum bp_result result = BP_RESULT_OK;
+ struct dcn10_link_encoder *enc10 = &enc20->enc10;
+
+ enc10->base.funcs = &dcn30_link_enc_funcs;
+ enc10->base.ctx = init_data->ctx;
+ enc10->base.id = init_data->encoder;
+
+ enc10->base.hpd_source = init_data->hpd_source;
+ enc10->base.connector = init_data->connector;
+
+ enc10->base.preferred_engine = ENGINE_ID_UNKNOWN;
+
+ enc10->base.features = *enc_features;
+
+ enc10->base.transmitter = init_data->transmitter;
+
+ /* set the flag to indicate whether driver poll the I2C data pin
+ * while doing the DP sink detect
+ */
+
+/* if (dal_adapter_service_is_feature_supported(as,
+ FEATURE_DP_SINK_DETECT_POLL_DATA_PIN))
+ enc10->base.features.flags.bits.
+ DP_SINK_DETECT_POLL_DATA_PIN = true;*/
+
+ enc10->base.output_signals =
+ SIGNAL_TYPE_DVI_SINGLE_LINK |
+ SIGNAL_TYPE_DVI_DUAL_LINK |
+ SIGNAL_TYPE_LVDS |
+ SIGNAL_TYPE_DISPLAY_PORT |
+ SIGNAL_TYPE_DISPLAY_PORT_MST |
+ SIGNAL_TYPE_EDP |
+ SIGNAL_TYPE_HDMI_TYPE_A;
+
+ /* For DCE 8.0 and 8.1, by design, UNIPHY is hardwired to DIG_BE.
+ * SW always assign DIG_FE 1:1 mapped to DIG_FE for non-MST UNIPHY.
+ * SW assign DIG_FE to non-MST UNIPHY first and MST last. So prefer
+ * DIG is per UNIPHY and used by SST DP, eDP, HDMI, DVI and LVDS.
+ * Prefer DIG assignment is decided by board design.
+ * For DCE 8.0, there are only max 6 UNIPHYs, we assume board design
+ * and VBIOS will filter out 7 UNIPHY for DCE 8.0.
+ * By this, adding DIGG should not hurt DCE 8.0.
+ * This will let DCE 8.1 share DCE 8.0 as much as possible
+ */
+
+ enc10->link_regs = link_regs;
+ enc10->aux_regs = aux_regs;
+ enc10->hpd_regs = hpd_regs;
+ enc10->link_shift = link_shift;
+ enc10->link_mask = link_mask;
+
+ switch (enc10->base.transmitter) {
+ case TRANSMITTER_UNIPHY_A:
+ enc10->base.preferred_engine = ENGINE_ID_DIGA;
+ break;
+ case TRANSMITTER_UNIPHY_B:
+ enc10->base.preferred_engine = ENGINE_ID_DIGB;
+ break;
+ case TRANSMITTER_UNIPHY_C:
+ enc10->base.preferred_engine = ENGINE_ID_DIGC;
+ break;
+ case TRANSMITTER_UNIPHY_D:
+ enc10->base.preferred_engine = ENGINE_ID_DIGD;
+ break;
+ case TRANSMITTER_UNIPHY_E:
+ enc10->base.preferred_engine = ENGINE_ID_DIGE;
+ break;
+ case TRANSMITTER_UNIPHY_F:
+ enc10->base.preferred_engine = ENGINE_ID_DIGF;
+ break;
+ case TRANSMITTER_UNIPHY_G:
+ enc10->base.preferred_engine = ENGINE_ID_DIGG;
+ break;
+ default:
+ ASSERT_CRITICAL(false);
+ enc10->base.preferred_engine = ENGINE_ID_UNKNOWN;
+ }
+
+ /* default to one to mirror Windows behavior */
+ enc10->base.features.flags.bits.HDMI_6GB_EN = 1;
+
+ result = bp_funcs->get_encoder_cap_info(enc10->base.ctx->dc_bios,
+ enc10->base.id, &bp_cap_info);
+
+ /* Override features with DCE-specific values */
+ if (result == BP_RESULT_OK) {
+ enc10->base.features.flags.bits.IS_HBR2_CAPABLE =
+ bp_cap_info.DP_HBR2_EN;
+ enc10->base.features.flags.bits.IS_HBR3_CAPABLE =
+ bp_cap_info.DP_HBR3_EN;
+ enc10->base.features.flags.bits.HDMI_6GB_EN = bp_cap_info.HDMI_6GB_EN;
+ enc10->base.features.flags.bits.DP_IS_USB_C =
+ bp_cap_info.DP_IS_USB_C;
+ } else {
+ DC_LOG_WARNING("%s: Failed to get encoder_cap_info from VBIOS with error code %d!\n",
+ __func__,
+ result);
+ }
+ if (enc10->base.ctx->dc->debug.hdmi20_disable) {
+ enc10->base.features.flags.bits.HDMI_6GB_EN = 0;
+ }
+}
diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dio_link_encoder.h b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dio_link_encoder.h
new file mode 100644
index 000000000000..585d1ce63db1
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dio_link_encoder.h
@@ -0,0 +1,76 @@
+/*
+ * Copyright 2020 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef __DC_LINK_ENCODER__DCN30_H__
+#define __DC_LINK_ENCODER__DCN30_H__
+
+#include "dcn20/dcn20_link_encoder.h"
+
+#define LE_DCN3_REG_LIST(id)\
+ SRI(DIG_BE_CNTL, DIG, id), \
+ SRI(DIG_BE_EN_CNTL, DIG, id), \
+ SRI(TMDS_CTL_BITS, DIG, id), \
+ SRI(TMDS_DCBALANCER_CONTROL, DIG, id), \
+ SRI(DP_CONFIG, DP, id), \
+ SRI(DP_DPHY_CNTL, DP, id), \
+ SRI(DP_DPHY_PRBS_CNTL, DP, id), \
+ SRI(DP_DPHY_SCRAM_CNTL, DP, id),\
+ SRI(DP_DPHY_SYM0, DP, id), \
+ SRI(DP_DPHY_SYM1, DP, id), \
+ SRI(DP_DPHY_SYM2, DP, id), \
+ SRI(DP_DPHY_TRAINING_PATTERN_SEL, DP, id), \
+ SRI(DP_LINK_CNTL, DP, id), \
+ SRI(DP_LINK_FRAMING_CNTL, DP, id), \
+ SRI(DP_MSE_SAT0, DP, id), \
+ SRI(DP_MSE_SAT1, DP, id), \
+ SRI(DP_MSE_SAT2, DP, id), \
+ SRI(DP_MSE_SAT_UPDATE, DP, id), \
+ SRI(DP_SEC_CNTL, DP, id), \
+ SRI(DP_VID_STREAM_CNTL, DP, id), \
+ SRI(DP_DPHY_FAST_TRAINING, DP, id), \
+ SRI(DP_SEC_CNTL1, DP, id), \
+ SRI(DP_DPHY_BS_SR_SWAP_CNTL, DP, id), \
+ SRI(DP_DPHY_HBR2_PATTERN_CONTROL, DP, id)
+
+#define LINK_ENCODER_MASK_SH_LIST_DCN30(mask_sh) \
+ LINK_ENCODER_MASK_SH_LIST_DCN20(mask_sh)
+
+#define DPCS_DCN3_MASK_SH_LIST(mask_sh)\
+ DPCS_DCN2_MASK_SH_LIST(mask_sh),\
+ LE_SF(DPCSTX0_DPCSTX_TX_CNTL, DPCS_TX_DATA_ORDER_INVERT_18_BIT, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL0, RDPCS_PHY_TX_VBOOST_LVL, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_CLOCK_CNTL, RDPCS_TX_CLK_EN, mask_sh)
+
+void dcn30_link_encoder_construct(
+ struct dcn20_link_encoder *enc20,
+ const struct encoder_init_data *init_data,
+ const struct encoder_feature_support *enc_features,
+ const struct dcn10_link_enc_registers *link_regs,
+ const struct dcn10_link_enc_aux_registers *aux_regs,
+ const struct dcn10_link_enc_hpd_registers *hpd_regs,
+ const struct dcn10_link_enc_shift *link_shift,
+ const struct dcn10_link_enc_mask *link_mask);
+
+#endif /* __DC_LINK_ENCODER__DCN30_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dio_stream_encoder.c b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dio_stream_encoder.c
new file mode 100644
index 000000000000..f5e80a0db72b
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dio_stream_encoder.c
@@ -0,0 +1,851 @@
+/*
+ * Copyright 2020 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+
+#include "dc_bios_types.h"
+#include "dcn30_dio_stream_encoder.h"
+#include "reg_helper.h"
+#include "hw_shared.h"
+#include "core_types.h"
+#include <linux/delay.h>
+
+
+#define DC_LOGGER \
+ enc1->base.ctx->logger
+
+
+#define REG(reg)\
+ (enc1->regs->reg)
+
+#undef FN
+#define FN(reg_name, field_name) \
+ enc1->se_shift->field_name, enc1->se_mask->field_name
+
+#define VBI_LINE_0 0
+#define HDMI_CLOCK_CHANNEL_RATE_MORE_340M 340000
+
+#define CTX \
+ enc1->base.ctx
+
+
+void convert_dc_info_packet_to_128(
+ const struct dc_info_packet *info_packet,
+ struct dc_info_packet_128 *info_packet_128)
+{
+ unsigned int i;
+
+ info_packet_128->hb0 = info_packet->hb0;
+ info_packet_128->hb1 = info_packet->hb1;
+ info_packet_128->hb2 = info_packet->hb2;
+ info_packet_128->hb3 = info_packet->hb3;
+
+ for (i = 0; i < 32; i++) {
+ info_packet_128->sb[i] = info_packet->sb[i];
+ }
+
+}
+static void enc3_update_hdmi_info_packet(
+ struct dcn10_stream_encoder *enc1,
+ uint32_t packet_index,
+ const struct dc_info_packet *info_packet)
+{
+ uint32_t cont, send, line;
+
+ if (info_packet->valid) {
+ enc1->base.vpg->funcs->update_generic_info_packet(
+ enc1->base.vpg,
+ packet_index,
+ info_packet);
+
+ /* enable transmission of packet(s) -
+ * packet transmission begins on the next frame */
+ cont = 1;
+ /* send packet(s) every frame */
+ send = 1;
+ /* select line number to send packets on */
+ line = 2;
+ } else {
+ cont = 0;
+ send = 0;
+ line = 0;
+ }
+
+ /* DP_SEC_GSP[x]_LINE_REFERENCE - keep default value REFER_TO_DP_SOF */
+
+ /* choose which generic packet control to use */
+ switch (packet_index) {
+ case 0:
+ REG_UPDATE_2(HDMI_GENERIC_PACKET_CONTROL0,
+ HDMI_GENERIC0_CONT, cont,
+ HDMI_GENERIC0_SEND, send);
+ REG_UPDATE(HDMI_GENERIC_PACKET_CONTROL1,
+ HDMI_GENERIC0_LINE, line);
+ break;
+ case 1:
+ REG_UPDATE_2(HDMI_GENERIC_PACKET_CONTROL0,
+ HDMI_GENERIC1_CONT, cont,
+ HDMI_GENERIC1_SEND, send);
+ REG_UPDATE(HDMI_GENERIC_PACKET_CONTROL1,
+ HDMI_GENERIC1_LINE, line);
+ break;
+ case 2:
+ REG_UPDATE_2(HDMI_GENERIC_PACKET_CONTROL0,
+ HDMI_GENERIC2_CONT, cont,
+ HDMI_GENERIC2_SEND, send);
+ REG_UPDATE(HDMI_GENERIC_PACKET_CONTROL2,
+ HDMI_GENERIC2_LINE, line);
+ break;
+ case 3:
+ REG_UPDATE_2(HDMI_GENERIC_PACKET_CONTROL0,
+ HDMI_GENERIC3_CONT, cont,
+ HDMI_GENERIC3_SEND, send);
+ REG_UPDATE(HDMI_GENERIC_PACKET_CONTROL2,
+ HDMI_GENERIC3_LINE, line);
+ break;
+ case 4:
+ REG_UPDATE_2(HDMI_GENERIC_PACKET_CONTROL0,
+ HDMI_GENERIC4_CONT, cont,
+ HDMI_GENERIC4_SEND, send);
+ REG_UPDATE(HDMI_GENERIC_PACKET_CONTROL3,
+ HDMI_GENERIC4_LINE, line);
+ break;
+ case 5:
+ REG_UPDATE_2(HDMI_GENERIC_PACKET_CONTROL0,
+ HDMI_GENERIC5_CONT, cont,
+ HDMI_GENERIC5_SEND, send);
+ REG_UPDATE(HDMI_GENERIC_PACKET_CONTROL3,
+ HDMI_GENERIC5_LINE, line);
+ break;
+ case 6:
+ REG_UPDATE_2(HDMI_GENERIC_PACKET_CONTROL0,
+ HDMI_GENERIC6_CONT, cont,
+ HDMI_GENERIC6_SEND, send);
+ REG_UPDATE(HDMI_GENERIC_PACKET_CONTROL4,
+ HDMI_GENERIC6_LINE, line);
+ break;
+ case 7:
+ REG_UPDATE_2(HDMI_GENERIC_PACKET_CONTROL0,
+ HDMI_GENERIC7_CONT, cont,
+ HDMI_GENERIC7_SEND, send);
+ REG_UPDATE(HDMI_GENERIC_PACKET_CONTROL4,
+ HDMI_GENERIC7_LINE, line);
+ break;
+ case 8:
+ REG_UPDATE_2(HDMI_GENERIC_PACKET_CONTROL6,
+ HDMI_GENERIC8_CONT, cont,
+ HDMI_GENERIC8_SEND, send);
+ REG_UPDATE(HDMI_GENERIC_PACKET_CONTROL7,
+ HDMI_GENERIC8_LINE, line);
+ break;
+ case 9:
+ REG_UPDATE_2(HDMI_GENERIC_PACKET_CONTROL6,
+ HDMI_GENERIC9_CONT, cont,
+ HDMI_GENERIC9_SEND, send);
+ REG_UPDATE(HDMI_GENERIC_PACKET_CONTROL7,
+ HDMI_GENERIC9_LINE, line);
+ break;
+ case 10:
+ REG_UPDATE_2(HDMI_GENERIC_PACKET_CONTROL6,
+ HDMI_GENERIC10_CONT, cont,
+ HDMI_GENERIC10_SEND, send);
+ REG_UPDATE(HDMI_GENERIC_PACKET_CONTROL8,
+ HDMI_GENERIC10_LINE, line);
+ break;
+ case 11:
+ REG_UPDATE_2(HDMI_GENERIC_PACKET_CONTROL6,
+ HDMI_GENERIC11_CONT, cont,
+ HDMI_GENERIC11_SEND, send);
+ REG_UPDATE(HDMI_GENERIC_PACKET_CONTROL8,
+ HDMI_GENERIC11_LINE, line);
+ break;
+ case 12:
+ REG_UPDATE_2(HDMI_GENERIC_PACKET_CONTROL6,
+ HDMI_GENERIC12_CONT, cont,
+ HDMI_GENERIC12_SEND, send);
+ REG_UPDATE(HDMI_GENERIC_PACKET_CONTROL9,
+ HDMI_GENERIC12_LINE, line);
+ break;
+ case 13:
+ REG_UPDATE_2(HDMI_GENERIC_PACKET_CONTROL6,
+ HDMI_GENERIC13_CONT, cont,
+ HDMI_GENERIC13_SEND, send);
+ REG_UPDATE(HDMI_GENERIC_PACKET_CONTROL9,
+ HDMI_GENERIC13_LINE, line);
+ break;
+ case 14:
+ REG_UPDATE_2(HDMI_GENERIC_PACKET_CONTROL6,
+ HDMI_GENERIC14_CONT, cont,
+ HDMI_GENERIC14_SEND, send);
+ REG_UPDATE(HDMI_GENERIC_PACKET_CONTROL10,
+ HDMI_GENERIC14_LINE, line);
+ break;
+ default:
+ /* invalid HW packet index */
+ DC_LOG_WARNING(
+ "Invalid HW packet index: %s()\n",
+ __func__);
+ return;
+ }
+}
+
+static void enc3_stream_encoder_update_hdmi_info_packets(
+ struct stream_encoder *enc,
+ const struct encoder_info_frame *info_frame)
+{
+ struct dcn10_stream_encoder *enc1 = DCN10STRENC_FROM_STRENC(enc);
+
+ /* for bring up, disable dp double TODO */
+ REG_UPDATE(HDMI_DB_CONTROL, HDMI_DB_DISABLE, 1);
+ REG_UPDATE(AFMT_CNTL, AFMT_AUDIO_CLOCK_EN, 1);
+
+ /*Always add mandatory packets first followed by optional ones*/
+ enc3_update_hdmi_info_packet(enc1, 0, &info_frame->avi);
+ enc3_update_hdmi_info_packet(enc1, 5, &info_frame->hfvsif);
+ enc3_update_hdmi_info_packet(enc1, 2, &info_frame->gamut);
+ enc3_update_hdmi_info_packet(enc1, 1, &info_frame->vendor);
+ enc3_update_hdmi_info_packet(enc1, 3, &info_frame->spd);
+ enc3_update_hdmi_info_packet(enc1, 4, &info_frame->hdrsmd);
+}
+
+static void enc3_stream_encoder_stop_hdmi_info_packets(
+ struct stream_encoder *enc)
+{
+ struct dcn10_stream_encoder *enc1 = DCN10STRENC_FROM_STRENC(enc);
+
+ /* stop generic packets 0,1 on HDMI */
+ REG_SET_4(HDMI_GENERIC_PACKET_CONTROL0, 0,
+ HDMI_GENERIC0_CONT, 0,
+ HDMI_GENERIC0_SEND, 0,
+ HDMI_GENERIC1_CONT, 0,
+ HDMI_GENERIC1_SEND, 0);
+ REG_SET_2(HDMI_GENERIC_PACKET_CONTROL1, 0,
+ HDMI_GENERIC0_LINE, 0,
+ HDMI_GENERIC1_LINE, 0);
+
+ /* stop generic packets 2,3 on HDMI */
+ REG_SET_4(HDMI_GENERIC_PACKET_CONTROL0, 0,
+ HDMI_GENERIC2_CONT, 0,
+ HDMI_GENERIC2_SEND, 0,
+ HDMI_GENERIC3_CONT, 0,
+ HDMI_GENERIC3_SEND, 0);
+ REG_SET_2(HDMI_GENERIC_PACKET_CONTROL2, 0,
+ HDMI_GENERIC2_LINE, 0,
+ HDMI_GENERIC3_LINE, 0);
+
+ /* stop generic packets 4,5 on HDMI */
+ REG_SET_4(HDMI_GENERIC_PACKET_CONTROL0, 0,
+ HDMI_GENERIC4_CONT, 0,
+ HDMI_GENERIC4_SEND, 0,
+ HDMI_GENERIC5_CONT, 0,
+ HDMI_GENERIC5_SEND, 0);
+ REG_SET_2(HDMI_GENERIC_PACKET_CONTROL3, 0,
+ HDMI_GENERIC4_LINE, 0,
+ HDMI_GENERIC5_LINE, 0);
+
+ /* stop generic packets 6,7 on HDMI */
+ REG_SET_4(HDMI_GENERIC_PACKET_CONTROL0, 0,
+ HDMI_GENERIC6_CONT, 0,
+ HDMI_GENERIC6_SEND, 0,
+ HDMI_GENERIC7_CONT, 0,
+ HDMI_GENERIC7_SEND, 0);
+ REG_SET_2(HDMI_GENERIC_PACKET_CONTROL4, 0,
+ HDMI_GENERIC6_LINE, 0,
+ HDMI_GENERIC7_LINE, 0);
+
+ /* stop generic packets 8,9 on HDMI */
+ REG_SET_4(HDMI_GENERIC_PACKET_CONTROL6, 0,
+ HDMI_GENERIC8_CONT, 0,
+ HDMI_GENERIC8_SEND, 0,
+ HDMI_GENERIC9_CONT, 0,
+ HDMI_GENERIC9_SEND, 0);
+ REG_SET_2(HDMI_GENERIC_PACKET_CONTROL7, 0,
+ HDMI_GENERIC8_LINE, 0,
+ HDMI_GENERIC9_LINE, 0);
+
+ /* stop generic packets 10,11 on HDMI */
+ REG_SET_4(HDMI_GENERIC_PACKET_CONTROL6, 0,
+ HDMI_GENERIC10_CONT, 0,
+ HDMI_GENERIC10_SEND, 0,
+ HDMI_GENERIC11_CONT, 0,
+ HDMI_GENERIC11_SEND, 0);
+ REG_SET_2(HDMI_GENERIC_PACKET_CONTROL8, 0,
+ HDMI_GENERIC10_LINE, 0,
+ HDMI_GENERIC11_LINE, 0);
+
+ /* stop generic packets 12,13 on HDMI */
+ REG_SET_4(HDMI_GENERIC_PACKET_CONTROL6, 0,
+ HDMI_GENERIC12_CONT, 0,
+ HDMI_GENERIC12_SEND, 0,
+ HDMI_GENERIC13_CONT, 0,
+ HDMI_GENERIC13_SEND, 0);
+ REG_SET_2(HDMI_GENERIC_PACKET_CONTROL9, 0,
+ HDMI_GENERIC12_LINE, 0,
+ HDMI_GENERIC13_LINE, 0);
+
+ /* stop generic packet 14 on HDMI */
+ REG_SET_2(HDMI_GENERIC_PACKET_CONTROL6, 0,
+ HDMI_GENERIC14_CONT, 0,
+ HDMI_GENERIC14_SEND, 0);
+ REG_UPDATE(HDMI_GENERIC_PACKET_CONTROL10,
+ HDMI_GENERIC14_LINE, 0);
+}
+
+/* Set DSC-related configuration.
+ * dsc_mode: 0 disables DSC, other values enable DSC in specified format
+ * sc_bytes_per_pixel: Bytes per pixel in u3.28 format
+ * dsc_slice_width: Slice width in pixels
+ */
+static void enc3_dp_set_dsc_config(struct stream_encoder *enc,
+ enum optc_dsc_mode dsc_mode,
+ uint32_t dsc_bytes_per_pixel,
+ uint32_t dsc_slice_width)
+{
+ struct dcn10_stream_encoder *enc1 = DCN10STRENC_FROM_STRENC(enc);
+
+ REG_UPDATE_2(DP_DSC_CNTL,
+ DP_DSC_MODE, dsc_mode,
+ DP_DSC_SLICE_WIDTH, dsc_slice_width);
+
+ REG_SET(DP_DSC_BYTES_PER_PIXEL, 0,
+ DP_DSC_BYTES_PER_PIXEL, dsc_bytes_per_pixel);
+}
+
+
+static void enc3_dp_set_dsc_pps_info_packet(struct stream_encoder *enc,
+ bool enable,
+ uint8_t *dsc_packed_pps)
+{
+ struct dcn10_stream_encoder *enc1 = DCN10STRENC_FROM_STRENC(enc);
+
+ if (enable) {
+ struct dc_info_packet pps_sdp;
+ int i;
+
+ /* Configure for PPS packet size (128 bytes) */
+ REG_UPDATE(DP_SEC_CNTL2, DP_SEC_GSP11_PPS, 1);
+
+ /* We need turn on clock before programming AFMT block
+ *
+ * TODO: We may not need this here anymore since update_generic_info_packet
+ * no longer touches AFMT
+ */
+ REG_UPDATE(AFMT_CNTL, AFMT_AUDIO_CLOCK_EN, 1);
+
+ /* Load PPS into infoframe (SDP) registers */
+ pps_sdp.valid = true;
+ pps_sdp.hb0 = 0;
+ pps_sdp.hb1 = DC_DP_INFOFRAME_TYPE_PPS;
+ pps_sdp.hb2 = 127;
+ pps_sdp.hb3 = 0;
+
+ for (i = 0; i < 4; i++) {
+ memcpy(pps_sdp.sb, &dsc_packed_pps[i * 32], 32);
+ enc1->base.vpg->funcs->update_generic_info_packet(
+ enc1->base.vpg,
+ 11 + i,
+ &pps_sdp);
+ }
+
+ /* SW should make sure VBID[6] update line number is bigger
+ * than PPS transmit line number
+ */
+ REG_UPDATE(DP_GSP11_CNTL,
+ DP_SEC_GSP11_LINE_NUM, 2);
+ REG_UPDATE_2(DP_MSA_VBID_MISC,
+ DP_VBID6_LINE_REFERENCE, 0,
+ DP_VBID6_LINE_NUM, 3);
+
+ /* Send PPS data at the line number specified above.
+ * DP spec requires PPS to be sent only when it changes, however since
+ * decoder has to be able to handle its change on every frame, we're
+ * sending it always (i.e. on every frame) to reduce the chance it'd be
+ * missed by decoder. If it turns out required to send PPS only when it
+ * changes, we can use DP_SEC_GSP11_SEND register.
+ */
+ REG_UPDATE(DP_GSP11_CNTL,
+ DP_SEC_GSP11_ENABLE, 1);
+ REG_UPDATE(DP_SEC_CNTL,
+ DP_SEC_STREAM_ENABLE, 1);
+ } else {
+ /* Disable Generic Stream Packet 11 (GSP) transmission */
+ REG_UPDATE(DP_GSP11_CNTL, DP_SEC_GSP11_ENABLE, 0);
+ REG_UPDATE(DP_SEC_CNTL2, DP_SEC_GSP11_PPS, 0);
+ }
+}
+
+
+/* this function read dsc related register fields to be logged later in dcn10_log_hw_state
+ * into a dcn_dsc_state struct.
+ */
+static void enc3_read_state(struct stream_encoder *enc, struct enc_state *s)
+{
+ struct dcn10_stream_encoder *enc1 = DCN10STRENC_FROM_STRENC(enc);
+
+ //if dsc is enabled, continue to read
+ REG_GET(DP_DSC_CNTL, DP_DSC_MODE, &s->dsc_mode);
+ if (s->dsc_mode) {
+ REG_GET(DP_DSC_CNTL, DP_DSC_SLICE_WIDTH, &s->dsc_slice_width);
+ REG_GET(DP_GSP11_CNTL, DP_SEC_GSP11_LINE_NUM, &s->sec_gsp_pps_line_num);
+
+ REG_GET(DP_MSA_VBID_MISC, DP_VBID6_LINE_REFERENCE, &s->vbid6_line_reference);
+ REG_GET(DP_MSA_VBID_MISC, DP_VBID6_LINE_NUM, &s->vbid6_line_num);
+
+ REG_GET(DP_GSP11_CNTL, DP_SEC_GSP11_ENABLE, &s->sec_gsp_pps_enable);
+ REG_GET(DP_SEC_CNTL, DP_SEC_STREAM_ENABLE, &s->sec_stream_enable);
+ }
+}
+
+static void enc3_stream_encoder_update_dp_info_packets(
+ struct stream_encoder *enc,
+ const struct encoder_info_frame *info_frame)
+{
+ struct dcn10_stream_encoder *enc1 = DCN10STRENC_FROM_STRENC(enc);
+ uint32_t value = 0;
+ uint32_t dmdata_packet_enabled = 0;
+
+ if (info_frame->vsc.valid) {
+ enc->vpg->funcs->update_generic_info_packet(
+ enc->vpg,
+ 0, /* packetIndex */
+ &info_frame->vsc);
+ }
+ if (info_frame->spd.valid) {
+ enc->vpg->funcs->update_generic_info_packet(
+ enc->vpg,
+ 2, /* packetIndex */
+ &info_frame->spd);
+ }
+ if (info_frame->hdrsmd.valid) {
+ enc->vpg->funcs->update_generic_info_packet(
+ enc->vpg,
+ 3, /* packetIndex */
+ &info_frame->hdrsmd);
+ }
+ /* packetIndex 4 is used for send immediate sdp message, and please
+ * use other packetIndex (such as 5,6) for other info packet
+ */
+
+ /* enable/disable transmission of packet(s).
+ * If enabled, packet transmission begins on the next frame
+ */
+ REG_UPDATE(DP_SEC_CNTL, DP_SEC_GSP0_ENABLE, info_frame->vsc.valid);
+ REG_UPDATE(DP_SEC_CNTL, DP_SEC_GSP2_ENABLE, info_frame->spd.valid);
+ REG_UPDATE(DP_SEC_CNTL, DP_SEC_GSP3_ENABLE, info_frame->hdrsmd.valid);
+
+
+ /* This bit is the master enable bit.
+ * When enabling secondary stream engine,
+ * this master bit must also be set.
+ * This register shared with audio info frame.
+ * Therefore we need to enable master bit
+ * if at least on of the fields is not 0
+ */
+ value = REG_READ(DP_SEC_CNTL);
+ if (value)
+ REG_UPDATE(DP_SEC_CNTL, DP_SEC_STREAM_ENABLE, 1);
+
+ /* check if dynamic metadata packet transmission is enabled */
+ REG_GET(DP_SEC_METADATA_TRANSMISSION,
+ DP_SEC_METADATA_PACKET_ENABLE, &dmdata_packet_enabled);
+
+ if (dmdata_packet_enabled)
+ REG_UPDATE(DP_SEC_CNTL, DP_SEC_STREAM_ENABLE, 1);
+}
+
+static void enc3_dp_set_odm_combine(
+ struct stream_encoder *enc,
+ bool odm_combine)
+{
+ struct dcn10_stream_encoder *enc1 = DCN10STRENC_FROM_STRENC(enc);
+
+ REG_UPDATE(DP_PIXEL_FORMAT, DP_PIXEL_COMBINE, odm_combine);
+}
+
+/* setup stream encoder in dvi mode */
+void enc3_stream_encoder_dvi_set_stream_attribute(
+ struct stream_encoder *enc,
+ struct dc_crtc_timing *crtc_timing,
+ bool is_dual_link)
+{
+ struct dcn10_stream_encoder *enc1 = DCN10STRENC_FROM_STRENC(enc);
+
+ if (!enc->ctx->dc->debug.avoid_vbios_exec_table) {
+ struct bp_encoder_control cntl = {0};
+
+ cntl.action = ENCODER_CONTROL_SETUP;
+ cntl.engine_id = enc1->base.id;
+ cntl.signal = is_dual_link ?
+ SIGNAL_TYPE_DVI_DUAL_LINK : SIGNAL_TYPE_DVI_SINGLE_LINK;
+ cntl.enable_dp_audio = false;
+ cntl.pixel_clock = crtc_timing->pix_clk_100hz / 10;
+ cntl.lanes_number = (is_dual_link) ? LANE_COUNT_EIGHT : LANE_COUNT_FOUR;
+
+ if (enc1->base.bp->funcs->encoder_control(
+ enc1->base.bp, &cntl) != BP_RESULT_OK)
+ return;
+
+ } else {
+
+ //Set pattern for clock channel, default vlue 0x63 does not work
+ REG_UPDATE(DIG_CLOCK_PATTERN, DIG_CLOCK_PATTERN, 0x1F);
+
+ //DIG_BE_TMDS_DVI_MODE : TMDS-DVI mode is already set in link_encoder_setup
+
+ //DIG_SOURCE_SELECT is already set in dig_connect_to_otg
+
+ /* set DIG_START to 0x1 to reset FIFO */
+ REG_UPDATE(DIG_FE_CNTL, DIG_START, 1);
+ udelay(1);
+
+ /* write 0 to take the FIFO out of reset */
+ REG_UPDATE(DIG_FE_CNTL, DIG_START, 0);
+ udelay(1);
+ }
+
+ ASSERT(crtc_timing->pixel_encoding == PIXEL_ENCODING_RGB);
+ ASSERT(crtc_timing->display_color_depth == COLOR_DEPTH_888);
+ enc1_stream_encoder_set_stream_attribute_helper(enc1, crtc_timing);
+}
+
+/* setup stream encoder in hdmi mode */
+static void enc3_stream_encoder_hdmi_set_stream_attribute(
+ struct stream_encoder *enc,
+ struct dc_crtc_timing *crtc_timing,
+ int actual_pix_clk_khz,
+ bool enable_audio)
+{
+ struct dcn10_stream_encoder *enc1 = DCN10STRENC_FROM_STRENC(enc);
+
+ if (!enc->ctx->dc->debug.avoid_vbios_exec_table) {
+ struct bp_encoder_control cntl = {0};
+
+ cntl.action = ENCODER_CONTROL_SETUP;
+ cntl.engine_id = enc1->base.id;
+ cntl.signal = SIGNAL_TYPE_HDMI_TYPE_A;
+ cntl.enable_dp_audio = enable_audio;
+ cntl.pixel_clock = actual_pix_clk_khz;
+ cntl.lanes_number = LANE_COUNT_FOUR;
+
+ if (enc1->base.bp->funcs->encoder_control(
+ enc1->base.bp, &cntl) != BP_RESULT_OK)
+ return;
+
+ } else {
+
+ //Set pattern for clock channel, default vlue 0x63 does not work
+ REG_UPDATE(DIG_CLOCK_PATTERN, DIG_CLOCK_PATTERN, 0x1F);
+
+ //DIG_BE_TMDS_HDMI_MODE : TMDS-HDMI mode is already set in link_encoder_setup
+
+ //DIG_SOURCE_SELECT is already set in dig_connect_to_otg
+
+ /* set DIG_START to 0x1 to reset FIFO */
+ REG_UPDATE(DIG_FE_CNTL, DIG_START, 1);
+ udelay(1);
+
+ /* write 0 to take the FIFO out of reset */
+ REG_UPDATE(DIG_FE_CNTL, DIG_START, 0);
+ udelay(1);
+ }
+
+ /* Configure pixel encoding */
+ enc1_stream_encoder_set_stream_attribute_helper(enc1, crtc_timing);
+
+ /* setup HDMI engine */
+ REG_UPDATE_6(HDMI_CONTROL,
+ HDMI_PACKET_GEN_VERSION, 1,
+ HDMI_KEEPOUT_MODE, 1,
+ HDMI_DEEP_COLOR_ENABLE, 0,
+ HDMI_DATA_SCRAMBLE_EN, 0,
+ HDMI_NO_EXTRA_NULL_PACKET_FILLED, 1,
+ HDMI_CLOCK_CHANNEL_RATE, 0);
+
+ /* Configure color depth */
+ switch (crtc_timing->display_color_depth) {
+ case COLOR_DEPTH_888:
+ REG_UPDATE(HDMI_CONTROL, HDMI_DEEP_COLOR_DEPTH, 0);
+ break;
+ case COLOR_DEPTH_101010:
+ if (crtc_timing->pixel_encoding == PIXEL_ENCODING_YCBCR422) {
+ REG_UPDATE_2(HDMI_CONTROL,
+ HDMI_DEEP_COLOR_DEPTH, 1,
+ HDMI_DEEP_COLOR_ENABLE, 0);
+ } else {
+ REG_UPDATE_2(HDMI_CONTROL,
+ HDMI_DEEP_COLOR_DEPTH, 1,
+ HDMI_DEEP_COLOR_ENABLE, 1);
+ }
+ break;
+ case COLOR_DEPTH_121212:
+ if (crtc_timing->pixel_encoding == PIXEL_ENCODING_YCBCR422) {
+ REG_UPDATE_2(HDMI_CONTROL,
+ HDMI_DEEP_COLOR_DEPTH, 2,
+ HDMI_DEEP_COLOR_ENABLE, 0);
+ } else {
+ REG_UPDATE_2(HDMI_CONTROL,
+ HDMI_DEEP_COLOR_DEPTH, 2,
+ HDMI_DEEP_COLOR_ENABLE, 1);
+ }
+ break;
+ case COLOR_DEPTH_161616:
+ REG_UPDATE_2(HDMI_CONTROL,
+ HDMI_DEEP_COLOR_DEPTH, 3,
+ HDMI_DEEP_COLOR_ENABLE, 1);
+ break;
+ default:
+ break;
+ }
+
+ if (actual_pix_clk_khz >= HDMI_CLOCK_CHANNEL_RATE_MORE_340M) {
+ /* enable HDMI data scrambler
+ * HDMI_CLOCK_CHANNEL_RATE_MORE_340M
+ * Clock channel frequency is 1/4 of character rate.
+ */
+ REG_UPDATE_2(HDMI_CONTROL,
+ HDMI_DATA_SCRAMBLE_EN, 1,
+ HDMI_CLOCK_CHANNEL_RATE, 1);
+ } else if (crtc_timing->flags.LTE_340MCSC_SCRAMBLE) {
+
+ /* TODO: New feature for DCE11, still need to implement */
+
+ /* enable HDMI data scrambler
+ * HDMI_CLOCK_CHANNEL_FREQ_EQUAL_TO_CHAR_RATE
+ * Clock channel frequency is the same
+ * as character rate
+ */
+ REG_UPDATE_2(HDMI_CONTROL,
+ HDMI_DATA_SCRAMBLE_EN, 1,
+ HDMI_CLOCK_CHANNEL_RATE, 0);
+ }
+
+
+ /* Enable transmission of General Control packet on every frame */
+ REG_UPDATE_3(HDMI_VBI_PACKET_CONTROL,
+ HDMI_GC_CONT, 1,
+ HDMI_GC_SEND, 1,
+ HDMI_NULL_SEND, 1);
+
+ /* following belongs to audio */
+ /* Enable Audio InfoFrame packet transmission. */
+ REG_UPDATE(HDMI_INFOFRAME_CONTROL0, HDMI_AUDIO_INFO_SEND, 1);
+
+ /* update double-buffered AUDIO_INFO registers immediately */
+ ASSERT (enc->afmt);
+ enc->afmt->funcs->audio_info_immediate_update(enc->afmt);
+
+ /* Select line number on which to send Audio InfoFrame packets */
+ REG_UPDATE(HDMI_INFOFRAME_CONTROL1, HDMI_AUDIO_INFO_LINE,
+ VBI_LINE_0 + 2);
+
+ /* set HDMI GC AVMUTE */
+ REG_UPDATE(HDMI_GC, HDMI_GC_AVMUTE, 0);
+}
+
+static void enc3_audio_mute_control(
+ struct stream_encoder *enc,
+ bool mute)
+{
+ ASSERT (enc->afmt);
+ enc->afmt->funcs->audio_mute_control(enc->afmt, mute);
+}
+
+static void enc3_se_dp_audio_setup(
+ struct stream_encoder *enc,
+ unsigned int az_inst,
+ struct audio_info *info)
+{
+ ASSERT (enc->afmt);
+ enc->afmt->funcs->se_audio_setup(enc->afmt, az_inst, info);
+}
+
+#define DP_SEC_AUD_N__DP_SEC_AUD_N__DEFAULT 0x8000
+#define DP_SEC_TIMESTAMP__DP_SEC_TIMESTAMP_MODE__AUTO_CALC 1
+
+static void enc3_se_setup_dp_audio(
+ struct stream_encoder *enc)
+{
+ struct dcn10_stream_encoder *enc1 = DCN10STRENC_FROM_STRENC(enc);
+
+ /* --- DP Audio packet configurations --- */
+
+ /* ATP Configuration */
+ REG_SET(DP_SEC_AUD_N, 0,
+ DP_SEC_AUD_N, DP_SEC_AUD_N__DP_SEC_AUD_N__DEFAULT);
+
+ /* Async/auto-calc timestamp mode */
+ REG_SET(DP_SEC_TIMESTAMP, 0, DP_SEC_TIMESTAMP_MODE,
+ DP_SEC_TIMESTAMP__DP_SEC_TIMESTAMP_MODE__AUTO_CALC);
+
+ ASSERT (enc->afmt);
+ enc->afmt->funcs->setup_dp_audio(enc->afmt);
+}
+
+static void enc3_se_dp_audio_enable(
+ struct stream_encoder *enc)
+{
+ enc1_se_enable_audio_clock(enc, true);
+ enc3_se_setup_dp_audio(enc);
+ enc1_se_enable_dp_audio(enc);
+}
+
+static void enc3_se_setup_hdmi_audio(
+ struct stream_encoder *enc,
+ const struct audio_crtc_info *crtc_info)
+{
+ struct dcn10_stream_encoder *enc1 = DCN10STRENC_FROM_STRENC(enc);
+
+ struct audio_clock_info audio_clock_info = {0};
+
+ /* Setup audio in AFMT - program AFMT block associated with DIO */
+ ASSERT (enc->afmt);
+ enc->afmt->funcs->setup_hdmi_audio(enc->afmt);
+
+ /* HDMI_AUDIO_PACKET_CONTROL */
+ REG_UPDATE(HDMI_AUDIO_PACKET_CONTROL,
+ HDMI_AUDIO_DELAY_EN, 1);
+
+ /* HDMI_ACR_PACKET_CONTROL */
+ REG_UPDATE_3(HDMI_ACR_PACKET_CONTROL,
+ HDMI_ACR_AUTO_SEND, 1,
+ HDMI_ACR_SOURCE, 0,
+ HDMI_ACR_AUDIO_PRIORITY, 0);
+
+ /* Program audio clock sample/regeneration parameters */
+ get_audio_clock_info(crtc_info->color_depth,
+ crtc_info->requested_pixel_clock_100Hz,
+ crtc_info->calculated_pixel_clock_100Hz,
+ &audio_clock_info);
+ DC_LOG_HW_AUDIO(
+ "\n%s:Input::requested_pixel_clock_100Hz = %d" \
+ "calculated_pixel_clock_100Hz = %d \n", __func__, \
+ crtc_info->requested_pixel_clock_100Hz, \
+ crtc_info->calculated_pixel_clock_100Hz);
+
+ /* HDMI_ACR_32_0__HDMI_ACR_CTS_32_MASK */
+ REG_UPDATE(HDMI_ACR_32_0, HDMI_ACR_CTS_32, audio_clock_info.cts_32khz);
+
+ /* HDMI_ACR_32_1__HDMI_ACR_N_32_MASK */
+ REG_UPDATE(HDMI_ACR_32_1, HDMI_ACR_N_32, audio_clock_info.n_32khz);
+
+ /* HDMI_ACR_44_0__HDMI_ACR_CTS_44_MASK */
+ REG_UPDATE(HDMI_ACR_44_0, HDMI_ACR_CTS_44, audio_clock_info.cts_44khz);
+
+ /* HDMI_ACR_44_1__HDMI_ACR_N_44_MASK */
+ REG_UPDATE(HDMI_ACR_44_1, HDMI_ACR_N_44, audio_clock_info.n_44khz);
+
+ /* HDMI_ACR_48_0__HDMI_ACR_CTS_48_MASK */
+ REG_UPDATE(HDMI_ACR_48_0, HDMI_ACR_CTS_48, audio_clock_info.cts_48khz);
+
+ /* HDMI_ACR_48_1__HDMI_ACR_N_48_MASK */
+ REG_UPDATE(HDMI_ACR_48_1, HDMI_ACR_N_48, audio_clock_info.n_48khz);
+
+ /* Video driver cannot know in advance which sample rate will
+ * be used by HD Audio driver
+ * HDMI_ACR_PACKET_CONTROL__HDMI_ACR_N_MULTIPLE field is
+ * programmed below in interruppt callback
+ */
+}
+
+static void enc3_se_hdmi_audio_setup(
+ struct stream_encoder *enc,
+ unsigned int az_inst,
+ struct audio_info *info,
+ struct audio_crtc_info *audio_crtc_info)
+{
+ enc1_se_enable_audio_clock(enc, true);
+ enc3_se_setup_hdmi_audio(enc, audio_crtc_info);
+ ASSERT (enc->afmt);
+ enc->afmt->funcs->se_audio_setup(enc->afmt, az_inst, info);
+}
+
+
+static const struct stream_encoder_funcs dcn30_str_enc_funcs = {
+ .dp_set_odm_combine =
+ enc3_dp_set_odm_combine,
+ .dp_set_stream_attribute =
+ enc2_stream_encoder_dp_set_stream_attribute,
+ .hdmi_set_stream_attribute =
+ enc3_stream_encoder_hdmi_set_stream_attribute,
+ .dvi_set_stream_attribute =
+ enc3_stream_encoder_dvi_set_stream_attribute,
+ .set_mst_bandwidth =
+ enc1_stream_encoder_set_mst_bandwidth,
+ .update_hdmi_info_packets =
+ enc3_stream_encoder_update_hdmi_info_packets,
+ .stop_hdmi_info_packets =
+ enc3_stream_encoder_stop_hdmi_info_packets,
+ .update_dp_info_packets =
+ enc3_stream_encoder_update_dp_info_packets,
+ .stop_dp_info_packets =
+ enc1_stream_encoder_stop_dp_info_packets,
+ .dp_blank =
+ enc1_stream_encoder_dp_blank,
+ .dp_unblank =
+ enc2_stream_encoder_dp_unblank,
+ .audio_mute_control = enc3_audio_mute_control,
+
+ .dp_audio_setup = enc3_se_dp_audio_setup,
+ .dp_audio_enable = enc3_se_dp_audio_enable,
+ .dp_audio_disable = enc1_se_dp_audio_disable,
+
+ .hdmi_audio_setup = enc3_se_hdmi_audio_setup,
+ .hdmi_audio_disable = enc1_se_hdmi_audio_disable,
+ .setup_stereo_sync = enc1_setup_stereo_sync,
+ .set_avmute = enc1_stream_encoder_set_avmute,
+ .dig_connect_to_otg = enc1_dig_connect_to_otg,
+ .dig_source_otg = enc1_dig_source_otg,
+
+ .dp_get_pixel_format = enc1_stream_encoder_dp_get_pixel_format,
+
+ .enc_read_state = enc3_read_state,
+ .dp_set_dsc_config = enc3_dp_set_dsc_config,
+ .dp_set_dsc_pps_info_packet = enc3_dp_set_dsc_pps_info_packet,
+ .set_dynamic_metadata = enc2_set_dynamic_metadata,
+ .hdmi_reset_stream_attribute = enc1_reset_hdmi_stream_attribute,
+};
+
+void dcn30_dio_stream_encoder_construct(
+ struct dcn10_stream_encoder *enc1,
+ struct dc_context *ctx,
+ struct dc_bios *bp,
+ enum engine_id eng_id,
+ struct vpg *vpg,
+ struct afmt *afmt,
+ const struct dcn10_stream_enc_registers *regs,
+ const struct dcn10_stream_encoder_shift *se_shift,
+ const struct dcn10_stream_encoder_mask *se_mask)
+{
+ enc1->base.funcs = &dcn30_str_enc_funcs;
+ enc1->base.ctx = ctx;
+ enc1->base.id = eng_id;
+ enc1->base.bp = bp;
+ enc1->base.vpg = vpg;
+ enc1->base.afmt = afmt;
+ enc1->regs = regs;
+ enc1->se_shift = se_shift;
+ enc1->se_mask = se_mask;
+ enc1->base.stream_enc_inst = vpg->inst;
+}
+
diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dio_stream_encoder.h b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dio_stream_encoder.h
new file mode 100644
index 000000000000..8db6d76a1131
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dio_stream_encoder.h
@@ -0,0 +1,269 @@
+/*
+ * Copyright 2020 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef __DC_DIO_STREAM_ENCODER_DCN30_H__
+#define __DC_DIO_STREAM_ENCODER_DCN30_H__
+
+#include "dcn30/dcn30_vpg.h"
+#include "dcn30/dcn30_afmt.h"
+#include "stream_encoder.h"
+#include "dcn20/dcn20_stream_encoder.h"
+
+/* Register bit field name change */
+#define RDPCSTX0_RDPCSTX_CLOCK_CNTL__RDPCS_SYMCLK_DIV2_GATE_DIS__SHIFT 0x8
+#define RDPCSTX0_RDPCSTX_CLOCK_CNTL__RDPCS_SYMCLK_DIV2_EN__SHIFT 0x9
+#define RDPCSTX0_RDPCSTX_CLOCK_CNTL__RDPCS_SYMCLK_DIV2_CLOCK_ON__SHIFT 0xa
+#define DPCSTX0_DPCSTX_TX_CNTL__DPCS_TX_DATA_SWAP__SHIFT 0xe
+#define DPCSTX0_DPCSTX_TX_CNTL__DPCS_TX_DATA_ORDER_INVERT__SHIFT 0xf
+
+#define RDPCSTX0_RDPCSTX_CLOCK_CNTL__RDPCS_SYMCLK_DIV2_GATE_DIS_MASK 0x00000100L
+#define RDPCSTX0_RDPCSTX_CLOCK_CNTL__RDPCS_SYMCLK_DIV2_EN_MASK 0x00000200L
+#define RDPCSTX0_RDPCSTX_CLOCK_CNTL__RDPCS_SYMCLK_DIV2_CLOCK_ON_MASK 0x00000400L
+#define DPCSTX0_DPCSTX_TX_CNTL__DPCS_TX_DATA_SWAP_MASK 0x00004000L
+#define DPCSTX0_DPCSTX_TX_CNTL__DPCS_TX_DATA_ORDER_INVERT_MASK 0x00008000L
+
+
+#define SE_DCN3_REG_LIST(id)\
+ SRI(AFMT_CNTL, DIG, id), \
+ SRI(DIG_FE_CNTL, DIG, id), \
+ SRI(HDMI_CONTROL, DIG, id), \
+ SRI(HDMI_DB_CONTROL, DIG, id), \
+ SRI(HDMI_GC, DIG, id), \
+ SRI(HDMI_GENERIC_PACKET_CONTROL0, DIG, id), \
+ SRI(HDMI_GENERIC_PACKET_CONTROL1, DIG, id), \
+ SRI(HDMI_GENERIC_PACKET_CONTROL2, DIG, id), \
+ SRI(HDMI_GENERIC_PACKET_CONTROL3, DIG, id), \
+ SRI(HDMI_GENERIC_PACKET_CONTROL4, DIG, id), \
+ SRI(HDMI_GENERIC_PACKET_CONTROL5, DIG, id), \
+ SRI(HDMI_GENERIC_PACKET_CONTROL6, DIG, id), \
+ SRI(HDMI_GENERIC_PACKET_CONTROL7, DIG, id), \
+ SRI(HDMI_GENERIC_PACKET_CONTROL8, DIG, id), \
+ SRI(HDMI_GENERIC_PACKET_CONTROL9, DIG, id), \
+ SRI(HDMI_GENERIC_PACKET_CONTROL10, DIG, id), \
+ SRI(HDMI_INFOFRAME_CONTROL0, DIG, id), \
+ SRI(HDMI_INFOFRAME_CONTROL1, DIG, id), \
+ SRI(HDMI_VBI_PACKET_CONTROL, DIG, id), \
+ SRI(HDMI_AUDIO_PACKET_CONTROL, DIG, id),\
+ SRI(HDMI_ACR_PACKET_CONTROL, DIG, id),\
+ SRI(HDMI_ACR_32_0, DIG, id),\
+ SRI(HDMI_ACR_32_1, DIG, id),\
+ SRI(HDMI_ACR_44_0, DIG, id),\
+ SRI(HDMI_ACR_44_1, DIG, id),\
+ SRI(HDMI_ACR_48_0, DIG, id),\
+ SRI(HDMI_ACR_48_1, DIG, id),\
+ SRI(DP_DB_CNTL, DP, id), \
+ SRI(DP_MSA_MISC, DP, id), \
+ SRI(DP_MSA_VBID_MISC, DP, id), \
+ SRI(DP_MSA_COLORIMETRY, DP, id), \
+ SRI(DP_MSA_TIMING_PARAM1, DP, id), \
+ SRI(DP_MSA_TIMING_PARAM2, DP, id), \
+ SRI(DP_MSA_TIMING_PARAM3, DP, id), \
+ SRI(DP_MSA_TIMING_PARAM4, DP, id), \
+ SRI(DP_MSE_RATE_CNTL, DP, id), \
+ SRI(DP_MSE_RATE_UPDATE, DP, id), \
+ SRI(DP_PIXEL_FORMAT, DP, id), \
+ SRI(DP_SEC_CNTL, DP, id), \
+ SRI(DP_SEC_CNTL2, DP, id), \
+ SRI(DP_SEC_CNTL6, DP, id), \
+ SRI(DP_STEER_FIFO, DP, id), \
+ SRI(DP_VID_M, DP, id), \
+ SRI(DP_VID_N, DP, id), \
+ SRI(DP_VID_STREAM_CNTL, DP, id), \
+ SRI(DP_VID_TIMING, DP, id), \
+ SRI(DP_SEC_AUD_N, DP, id), \
+ SRI(DP_SEC_TIMESTAMP, DP, id), \
+ SRI(DP_DSC_CNTL, DP, id), \
+ SRI(DP_DSC_BYTES_PER_PIXEL, DP, id), \
+ SRI(DP_SEC_METADATA_TRANSMISSION, DP, id), \
+ SRI(HDMI_METADATA_PACKET_CONTROL, DIG, id), \
+ SRI(DP_SEC_FRAMING4, DP, id), \
+ SRI(DP_GSP11_CNTL, DP, id), \
+ SRI(DME_CONTROL, DME, id),\
+ SRI(DP_SEC_METADATA_TRANSMISSION, DP, id), \
+ SRI(HDMI_METADATA_PACKET_CONTROL, DIG, id), \
+ SRI(DIG_FE_CNTL, DIG, id), \
+ SRI(DIG_CLOCK_PATTERN, DIG, id)
+
+
+#define SE_COMMON_MASK_SH_LIST_DCN30_BASE(mask_sh)\
+ SE_SF(DP0_DP_PIXEL_FORMAT, DP_PIXEL_ENCODING, mask_sh),\
+ SE_SF(DP0_DP_PIXEL_FORMAT, DP_COMPONENT_DEPTH, mask_sh),\
+ SE_SF(DIG0_HDMI_CONTROL, HDMI_PACKET_GEN_VERSION, mask_sh),\
+ SE_SF(DIG0_HDMI_CONTROL, HDMI_KEEPOUT_MODE, mask_sh),\
+ SE_SF(DIG0_HDMI_CONTROL, HDMI_DEEP_COLOR_ENABLE, mask_sh),\
+ SE_SF(DIG0_HDMI_CONTROL, HDMI_DEEP_COLOR_DEPTH, mask_sh),\
+ SE_SF(DIG0_HDMI_CONTROL, HDMI_DATA_SCRAMBLE_EN, mask_sh),\
+ SE_SF(DIG0_HDMI_CONTROL, HDMI_NO_EXTRA_NULL_PACKET_FILLED, mask_sh),\
+ SE_SF(DIG0_HDMI_VBI_PACKET_CONTROL, HDMI_GC_CONT, mask_sh),\
+ SE_SF(DIG0_HDMI_VBI_PACKET_CONTROL, HDMI_GC_SEND, mask_sh),\
+ SE_SF(DIG0_HDMI_VBI_PACKET_CONTROL, HDMI_NULL_SEND, mask_sh),\
+ SE_SF(DIG0_HDMI_INFOFRAME_CONTROL0, HDMI_AUDIO_INFO_SEND, mask_sh),\
+ SE_SF(DIG0_HDMI_INFOFRAME_CONTROL1, HDMI_AUDIO_INFO_LINE, mask_sh),\
+ SE_SF(DIG0_HDMI_GC, HDMI_GC_AVMUTE, mask_sh),\
+ SE_SF(DP0_DP_MSE_RATE_CNTL, DP_MSE_RATE_X, mask_sh),\
+ SE_SF(DP0_DP_MSE_RATE_CNTL, DP_MSE_RATE_Y, mask_sh),\
+ SE_SF(DP0_DP_MSE_RATE_UPDATE, DP_MSE_RATE_UPDATE_PENDING, mask_sh),\
+ SE_SF(DP0_DP_SEC_CNTL, DP_SEC_GSP0_ENABLE, mask_sh),\
+ SE_SF(DP0_DP_SEC_CNTL, DP_SEC_STREAM_ENABLE, mask_sh),\
+ SE_SF(DP0_DP_SEC_CNTL, DP_SEC_GSP1_ENABLE, mask_sh),\
+ SE_SF(DP0_DP_SEC_CNTL, DP_SEC_GSP2_ENABLE, mask_sh),\
+ SE_SF(DP0_DP_SEC_CNTL, DP_SEC_GSP3_ENABLE, mask_sh),\
+ SE_SF(DP0_DP_SEC_CNTL, DP_SEC_MPG_ENABLE, mask_sh),\
+ SE_SF(DP0_DP_SEC_CNTL2, DP_SEC_GSP4_SEND, mask_sh),\
+ SE_SF(DP0_DP_SEC_CNTL2, DP_SEC_GSP4_SEND_PENDING, mask_sh),\
+ SE_SF(DP0_DP_SEC_CNTL4, DP_SEC_GSP4_LINE_NUM, mask_sh),\
+ SE_SF(DP0_DP_SEC_CNTL2, DP_SEC_GSP4_SEND_ANY_LINE, mask_sh),\
+ SE_SF(DP0_DP_VID_STREAM_CNTL, DP_VID_STREAM_DIS_DEFER, mask_sh),\
+ SE_SF(DP0_DP_VID_STREAM_CNTL, DP_VID_STREAM_ENABLE, mask_sh),\
+ SE_SF(DP0_DP_VID_STREAM_CNTL, DP_VID_STREAM_STATUS, mask_sh),\
+ SE_SF(DP0_DP_STEER_FIFO, DP_STEER_FIFO_RESET, mask_sh),\
+ SE_SF(DP0_DP_VID_TIMING, DP_VID_M_N_GEN_EN, mask_sh),\
+ SE_SF(DP0_DP_VID_N, DP_VID_N, mask_sh),\
+ SE_SF(DP0_DP_VID_M, DP_VID_M, mask_sh),\
+ SE_SF(DIG0_DIG_FE_CNTL, DIG_START, mask_sh),\
+ SE_SF(DIG0_HDMI_AUDIO_PACKET_CONTROL, HDMI_AUDIO_DELAY_EN, mask_sh),\
+ SE_SF(DIG0_HDMI_ACR_PACKET_CONTROL, HDMI_ACR_AUTO_SEND, mask_sh),\
+ SE_SF(DIG0_HDMI_ACR_PACKET_CONTROL, HDMI_ACR_SOURCE, mask_sh),\
+ SE_SF(DIG0_HDMI_ACR_PACKET_CONTROL, HDMI_ACR_AUDIO_PRIORITY, mask_sh),\
+ SE_SF(DIG0_HDMI_ACR_32_0, HDMI_ACR_CTS_32, mask_sh),\
+ SE_SF(DIG0_HDMI_ACR_32_1, HDMI_ACR_N_32, mask_sh),\
+ SE_SF(DIG0_HDMI_ACR_44_0, HDMI_ACR_CTS_44, mask_sh),\
+ SE_SF(DIG0_HDMI_ACR_44_1, HDMI_ACR_N_44, mask_sh),\
+ SE_SF(DIG0_HDMI_ACR_48_0, HDMI_ACR_CTS_48, mask_sh),\
+ SE_SF(DIG0_HDMI_ACR_48_1, HDMI_ACR_N_48, mask_sh),\
+ SE_SF(DP0_DP_SEC_AUD_N, DP_SEC_AUD_N, mask_sh),\
+ SE_SF(DP0_DP_SEC_TIMESTAMP, DP_SEC_TIMESTAMP_MODE, mask_sh),\
+ SE_SF(DP0_DP_SEC_CNTL, DP_SEC_ASP_ENABLE, mask_sh),\
+ SE_SF(DP0_DP_SEC_CNTL, DP_SEC_ATP_ENABLE, mask_sh),\
+ SE_SF(DP0_DP_SEC_CNTL, DP_SEC_AIP_ENABLE, mask_sh),\
+ SE_SF(DP0_DP_SEC_CNTL, DP_SEC_ACM_ENABLE, mask_sh),\
+ SE_SF(DIG0_AFMT_CNTL, AFMT_AUDIO_CLOCK_EN, mask_sh),\
+ SE_SF(DIG0_HDMI_CONTROL, HDMI_CLOCK_CHANNEL_RATE, mask_sh),\
+ SE_SF(DIG0_DIG_FE_CNTL, TMDS_PIXEL_ENCODING, mask_sh),\
+ SE_SF(DIG0_DIG_FE_CNTL, TMDS_COLOR_FORMAT, mask_sh),\
+ SE_SF(DIG0_DIG_FE_CNTL, DIG_STEREOSYNC_SELECT, mask_sh),\
+ SE_SF(DIG0_DIG_FE_CNTL, DIG_STEREOSYNC_GATE_EN, mask_sh),\
+ SE_SF(DP0_DP_SEC_CNTL, DP_SEC_GSP4_ENABLE, mask_sh),\
+ SE_SF(DP0_DP_SEC_CNTL, DP_SEC_GSP5_ENABLE, mask_sh),\
+ SE_SF(DP0_DP_SEC_CNTL, DP_SEC_GSP6_ENABLE, mask_sh),\
+ SE_SF(DP0_DP_SEC_CNTL, DP_SEC_GSP7_ENABLE, mask_sh),\
+ SE_SF(DP0_DP_SEC_CNTL2, DP_SEC_GSP7_SEND, mask_sh),\
+ SE_SF(DP0_DP_SEC_CNTL6, DP_SEC_GSP7_LINE_NUM, mask_sh),\
+ SE_SF(DP0_DP_SEC_CNTL2, DP_SEC_GSP11_PPS, mask_sh),\
+ SE_SF(DP0_DP_GSP11_CNTL, DP_SEC_GSP11_ENABLE, mask_sh),\
+ SE_SF(DP0_DP_GSP11_CNTL, DP_SEC_GSP11_LINE_NUM, mask_sh),\
+ SE_SF(DP0_DP_DB_CNTL, DP_DB_DISABLE, mask_sh),\
+ SE_SF(DP0_DP_MSA_COLORIMETRY, DP_MSA_MISC0, mask_sh),\
+ SE_SF(DP0_DP_MSA_TIMING_PARAM1, DP_MSA_HTOTAL, mask_sh),\
+ SE_SF(DP0_DP_MSA_TIMING_PARAM1, DP_MSA_VTOTAL, mask_sh),\
+ SE_SF(DP0_DP_MSA_TIMING_PARAM2, DP_MSA_HSTART, mask_sh),\
+ SE_SF(DP0_DP_MSA_TIMING_PARAM2, DP_MSA_VSTART, mask_sh),\
+ SE_SF(DP0_DP_MSA_TIMING_PARAM3, DP_MSA_HSYNCWIDTH, mask_sh),\
+ SE_SF(DP0_DP_MSA_TIMING_PARAM3, DP_MSA_HSYNCPOLARITY, mask_sh),\
+ SE_SF(DP0_DP_MSA_TIMING_PARAM3, DP_MSA_VSYNCWIDTH, mask_sh),\
+ SE_SF(DP0_DP_MSA_TIMING_PARAM3, DP_MSA_VSYNCPOLARITY, mask_sh),\
+ SE_SF(DP0_DP_MSA_TIMING_PARAM4, DP_MSA_HWIDTH, mask_sh),\
+ SE_SF(DP0_DP_MSA_TIMING_PARAM4, DP_MSA_VHEIGHT, mask_sh),\
+ SE_SF(DIG0_HDMI_DB_CONTROL, HDMI_DB_DISABLE, mask_sh),\
+ SE_SF(DP0_DP_VID_TIMING, DP_VID_N_MUL, mask_sh),\
+ SE_SF(DIG0_DIG_FE_CNTL, DIG_SOURCE_SELECT, mask_sh), \
+ SE_SF(DIG0_HDMI_GENERIC_PACKET_CONTROL0, HDMI_GENERIC0_CONT, mask_sh),\
+ SE_SF(DIG0_HDMI_GENERIC_PACKET_CONTROL0, HDMI_GENERIC0_SEND, mask_sh),\
+ SE_SF(DIG0_HDMI_GENERIC_PACKET_CONTROL0, HDMI_GENERIC1_CONT, mask_sh),\
+ SE_SF(DIG0_HDMI_GENERIC_PACKET_CONTROL0, HDMI_GENERIC1_SEND, mask_sh),\
+ SE_SF(DIG0_HDMI_GENERIC_PACKET_CONTROL0, HDMI_GENERIC2_CONT, mask_sh),\
+ SE_SF(DIG0_HDMI_GENERIC_PACKET_CONTROL0, HDMI_GENERIC2_SEND, mask_sh),\
+ SE_SF(DIG0_HDMI_GENERIC_PACKET_CONTROL0, HDMI_GENERIC3_CONT, mask_sh),\
+ SE_SF(DIG0_HDMI_GENERIC_PACKET_CONTROL0, HDMI_GENERIC3_SEND, mask_sh),\
+ SE_SF(DIG0_HDMI_GENERIC_PACKET_CONTROL0, HDMI_GENERIC4_CONT, mask_sh),\
+ SE_SF(DIG0_HDMI_GENERIC_PACKET_CONTROL0, HDMI_GENERIC4_SEND, mask_sh),\
+ SE_SF(DIG0_HDMI_GENERIC_PACKET_CONTROL0, HDMI_GENERIC5_CONT, mask_sh),\
+ SE_SF(DIG0_HDMI_GENERIC_PACKET_CONTROL0, HDMI_GENERIC5_SEND, mask_sh),\
+ SE_SF(DIG0_HDMI_GENERIC_PACKET_CONTROL0, HDMI_GENERIC6_CONT, mask_sh),\
+ SE_SF(DIG0_HDMI_GENERIC_PACKET_CONTROL0, HDMI_GENERIC6_SEND, mask_sh),\
+ SE_SF(DIG0_HDMI_GENERIC_PACKET_CONTROL0, HDMI_GENERIC7_CONT, mask_sh),\
+ SE_SF(DIG0_HDMI_GENERIC_PACKET_CONTROL0, HDMI_GENERIC7_SEND, mask_sh),\
+ SE_SF(DIG0_HDMI_GENERIC_PACKET_CONTROL6, HDMI_GENERIC8_CONT, mask_sh),\
+ SE_SF(DIG0_HDMI_GENERIC_PACKET_CONTROL6, HDMI_GENERIC8_SEND, mask_sh),\
+ SE_SF(DIG0_HDMI_GENERIC_PACKET_CONTROL6, HDMI_GENERIC9_CONT, mask_sh),\
+ SE_SF(DIG0_HDMI_GENERIC_PACKET_CONTROL6, HDMI_GENERIC9_SEND, mask_sh),\
+ SE_SF(DIG0_HDMI_GENERIC_PACKET_CONTROL6, HDMI_GENERIC10_CONT, mask_sh),\
+ SE_SF(DIG0_HDMI_GENERIC_PACKET_CONTROL6, HDMI_GENERIC10_SEND, mask_sh),\
+ SE_SF(DIG0_HDMI_GENERIC_PACKET_CONTROL6, HDMI_GENERIC11_CONT, mask_sh),\
+ SE_SF(DIG0_HDMI_GENERIC_PACKET_CONTROL6, HDMI_GENERIC11_SEND, mask_sh),\
+ SE_SF(DIG0_HDMI_GENERIC_PACKET_CONTROL6, HDMI_GENERIC12_CONT, mask_sh),\
+ SE_SF(DIG0_HDMI_GENERIC_PACKET_CONTROL6, HDMI_GENERIC12_SEND, mask_sh),\
+ SE_SF(DIG0_HDMI_GENERIC_PACKET_CONTROL6, HDMI_GENERIC13_CONT, mask_sh),\
+ SE_SF(DIG0_HDMI_GENERIC_PACKET_CONTROL6, HDMI_GENERIC13_SEND, mask_sh),\
+ SE_SF(DIG0_HDMI_GENERIC_PACKET_CONTROL6, HDMI_GENERIC14_CONT, mask_sh),\
+ SE_SF(DIG0_HDMI_GENERIC_PACKET_CONTROL6, HDMI_GENERIC14_SEND, mask_sh),\
+ SE_SF(DIG0_HDMI_GENERIC_PACKET_CONTROL1, HDMI_GENERIC0_LINE, mask_sh),\
+ SE_SF(DIG0_HDMI_GENERIC_PACKET_CONTROL1, HDMI_GENERIC1_LINE, mask_sh),\
+ SE_SF(DIG0_HDMI_GENERIC_PACKET_CONTROL2, HDMI_GENERIC2_LINE, mask_sh),\
+ SE_SF(DIG0_HDMI_GENERIC_PACKET_CONTROL2, HDMI_GENERIC3_LINE, mask_sh),\
+ SE_SF(DIG0_HDMI_GENERIC_PACKET_CONTROL3, HDMI_GENERIC4_LINE, mask_sh),\
+ SE_SF(DIG0_HDMI_GENERIC_PACKET_CONTROL3, HDMI_GENERIC5_LINE, mask_sh),\
+ SE_SF(DIG0_HDMI_GENERIC_PACKET_CONTROL4, HDMI_GENERIC6_LINE, mask_sh),\
+ SE_SF(DIG0_HDMI_GENERIC_PACKET_CONTROL4, HDMI_GENERIC7_LINE, mask_sh),\
+ SE_SF(DIG0_HDMI_GENERIC_PACKET_CONTROL7, HDMI_GENERIC8_LINE, mask_sh),\
+ SE_SF(DIG0_HDMI_GENERIC_PACKET_CONTROL7, HDMI_GENERIC9_LINE, mask_sh),\
+ SE_SF(DIG0_HDMI_GENERIC_PACKET_CONTROL8, HDMI_GENERIC10_LINE, mask_sh),\
+ SE_SF(DIG0_HDMI_GENERIC_PACKET_CONTROL8, HDMI_GENERIC11_LINE, mask_sh),\
+ SE_SF(DIG0_HDMI_GENERIC_PACKET_CONTROL9, HDMI_GENERIC12_LINE, mask_sh),\
+ SE_SF(DIG0_HDMI_GENERIC_PACKET_CONTROL9, HDMI_GENERIC13_LINE, mask_sh),\
+ SE_SF(DIG0_HDMI_GENERIC_PACKET_CONTROL10, HDMI_GENERIC14_LINE, mask_sh),\
+ SE_SF(DP0_DP_DSC_CNTL, DP_DSC_MODE, mask_sh),\
+ SE_SF(DP0_DP_DSC_CNTL, DP_DSC_SLICE_WIDTH, mask_sh),\
+ SE_SF(DP0_DP_DSC_BYTES_PER_PIXEL, DP_DSC_BYTES_PER_PIXEL, mask_sh),\
+ SE_SF(DP0_DP_MSA_VBID_MISC, DP_VBID6_LINE_REFERENCE, mask_sh),\
+ SE_SF(DP0_DP_MSA_VBID_MISC, DP_VBID6_LINE_NUM, mask_sh),\
+ SE_SF(DME0_DME_CONTROL, METADATA_ENGINE_EN, mask_sh),\
+ SE_SF(DME0_DME_CONTROL, METADATA_HUBP_REQUESTOR_ID, mask_sh),\
+ SE_SF(DME0_DME_CONTROL, METADATA_STREAM_TYPE, mask_sh),\
+ SE_SF(DP0_DP_SEC_METADATA_TRANSMISSION, DP_SEC_METADATA_PACKET_ENABLE, mask_sh),\
+ SE_SF(DP0_DP_SEC_METADATA_TRANSMISSION, DP_SEC_METADATA_PACKET_LINE_REFERENCE, mask_sh),\
+ SE_SF(DP0_DP_SEC_METADATA_TRANSMISSION, DP_SEC_METADATA_PACKET_LINE, mask_sh),\
+ SE_SF(DIG0_HDMI_METADATA_PACKET_CONTROL, HDMI_METADATA_PACKET_ENABLE, mask_sh),\
+ SE_SF(DIG0_HDMI_METADATA_PACKET_CONTROL, HDMI_METADATA_PACKET_LINE_REFERENCE, mask_sh),\
+ SE_SF(DIG0_HDMI_METADATA_PACKET_CONTROL, HDMI_METADATA_PACKET_LINE, mask_sh),\
+ SE_SF(DP0_DP_PIXEL_FORMAT, DP_PIXEL_COMBINE, mask_sh),\
+ SE_SF(DP0_DP_SEC_FRAMING4, DP_SST_SDP_SPLITTING, mask_sh),\
+ SE_SF(DIG0_DIG_CLOCK_PATTERN, DIG_CLOCK_PATTERN, mask_sh)
+
+#define SE_COMMON_MASK_SH_LIST_DCN30(mask_sh)\
+ SE_COMMON_MASK_SH_LIST_DCN30_BASE(mask_sh)
+
+void dcn30_dio_stream_encoder_construct(
+ struct dcn10_stream_encoder *enc1,
+ struct dc_context *ctx,
+ struct dc_bios *bp,
+ enum engine_id eng_id,
+ struct vpg *vpg,
+ struct afmt *afmt,
+ const struct dcn10_stream_enc_registers *regs,
+ const struct dcn10_stream_encoder_shift *se_shift,
+ const struct dcn10_stream_encoder_mask *se_mask);
+
+#endif /* __DC_DIO_STREAM_ENCODER_DCN30_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dpp.c b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dpp.c
new file mode 100644
index 000000000000..0eb881f2e0d6
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dpp.c
@@ -0,0 +1,1414 @@
+/*
+ * Copyright 2020 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#include "dm_services.h"
+#include "core_types.h"
+#include "reg_helper.h"
+#include "dcn30_dpp.h"
+#include "basics/conversion.h"
+#include "dcn30_cm_common.h"
+
+#define REG(reg)\
+ dpp->tf_regs->reg
+
+#define CTX \
+ dpp->base.ctx
+
+#undef FN
+#define FN(reg_name, field_name) \
+ dpp->tf_shift->field_name, dpp->tf_mask->field_name
+
+
+void dpp30_read_state(struct dpp *dpp_base,
+ struct dcn_dpp_state *s)
+{
+ struct dcn20_dpp *dpp = TO_DCN20_DPP(dpp_base);
+
+ REG_GET(DPP_CONTROL,
+ DPP_CLOCK_ENABLE, &s->is_enabled);
+
+ // TODO: Implement for DCN3
+}
+/*program post scaler scs block in dpp CM*/
+void dpp3_program_post_csc(
+ struct dpp *dpp_base,
+ enum dc_color_space color_space,
+ enum dcn10_input_csc_select input_select,
+ const struct out_csc_color_matrix *tbl_entry)
+{
+ struct dcn3_dpp *dpp = TO_DCN30_DPP(dpp_base);
+ int i;
+ int arr_size = sizeof(dpp_input_csc_matrix)/sizeof(struct dpp_input_csc_matrix);
+ const uint16_t *regval = NULL;
+ uint32_t cur_select = 0;
+ enum dcn10_input_csc_select select;
+ struct color_matrices_reg gam_regs;
+
+ if (input_select == INPUT_CSC_SELECT_BYPASS) {
+ REG_SET(CM_POST_CSC_CONTROL, 0, CM_POST_CSC_MODE, 0);
+ return;
+ }
+
+ if (tbl_entry == NULL) {
+ for (i = 0; i < arr_size; i++)
+ if (dpp_input_csc_matrix[i].color_space == color_space) {
+ regval = dpp_input_csc_matrix[i].regval;
+ break;
+ }
+
+ if (regval == NULL) {
+ BREAK_TO_DEBUGGER();
+ return;
+ }
+ } else {
+ regval = tbl_entry->regval;
+ }
+
+ /* determine which CSC matrix (icsc or coma) we are using
+ * currently. select the alternate set to double buffer
+ * the CSC update so CSC is updated on frame boundary
+ */
+ REG_GET(CM_POST_CSC_CONTROL,
+ CM_POST_CSC_MODE_CURRENT, &cur_select);
+
+ if (cur_select != INPUT_CSC_SELECT_ICSC)
+ select = INPUT_CSC_SELECT_ICSC;
+ else
+ select = INPUT_CSC_SELECT_COMA;
+
+ gam_regs.shifts.csc_c11 = dpp->tf_shift->CM_POST_CSC_C11;
+ gam_regs.masks.csc_c11 = dpp->tf_mask->CM_POST_CSC_C11;
+ gam_regs.shifts.csc_c12 = dpp->tf_shift->CM_POST_CSC_C12;
+ gam_regs.masks.csc_c12 = dpp->tf_mask->CM_POST_CSC_C12;
+
+ if (select == INPUT_CSC_SELECT_ICSC) {
+
+ gam_regs.csc_c11_c12 = REG(CM_POST_CSC_C11_C12);
+ gam_regs.csc_c33_c34 = REG(CM_POST_CSC_C33_C34);
+
+ } else {
+
+ gam_regs.csc_c11_c12 = REG(CM_POST_CSC_B_C11_C12);
+ gam_regs.csc_c33_c34 = REG(CM_POST_CSC_B_C33_C34);
+
+ }
+
+ cm_helper_program_color_matrices(
+ dpp->base.ctx,
+ regval,
+ &gam_regs);
+
+ REG_SET(CM_POST_CSC_CONTROL, 0,
+ CM_POST_CSC_MODE, select);
+}
+
+
+/*CNVC degam unit has read only LUTs*/
+void dpp3_set_pre_degam(struct dpp *dpp_base, enum dc_transfer_func_predefined tr)
+{
+ struct dcn3_dpp *dpp = TO_DCN30_DPP(dpp_base);
+ int pre_degam_en = 1;
+ int degamma_lut_selection = 0;
+
+ switch (tr) {
+ case TRANSFER_FUNCTION_LINEAR:
+ case TRANSFER_FUNCTION_UNITY:
+ pre_degam_en = 0; //bypass
+ break;
+ case TRANSFER_FUNCTION_SRGB:
+ degamma_lut_selection = 0;
+ break;
+ case TRANSFER_FUNCTION_BT709:
+ degamma_lut_selection = 4;
+ break;
+ case TRANSFER_FUNCTION_PQ:
+ degamma_lut_selection = 5;
+ break;
+ case TRANSFER_FUNCTION_HLG:
+ degamma_lut_selection = 6;
+ break;
+ case TRANSFER_FUNCTION_GAMMA22:
+ degamma_lut_selection = 1;
+ break;
+ case TRANSFER_FUNCTION_GAMMA24:
+ degamma_lut_selection = 2;
+ break;
+ case TRANSFER_FUNCTION_GAMMA26:
+ degamma_lut_selection = 3;
+ break;
+ default:
+ pre_degam_en = 0;
+ break;
+ }
+
+ REG_SET_2(PRE_DEGAM, 0,
+ PRE_DEGAM_MODE, pre_degam_en,
+ PRE_DEGAM_SELECT, degamma_lut_selection);
+}
+
+static void dpp3_cnv_setup (
+ struct dpp *dpp_base,
+ enum surface_pixel_format format,
+ enum expansion_mode mode,
+ struct dc_csc_transform input_csc_color_matrix,
+ enum dc_color_space input_color_space,
+ struct cnv_alpha_2bit_lut *alpha_2bit_lut)
+{
+ struct dcn3_dpp *dpp = TO_DCN30_DPP(dpp_base);
+ uint32_t pixel_format = 0;
+ uint32_t alpha_en = 1;
+ enum dc_color_space color_space = COLOR_SPACE_SRGB;
+ enum dcn10_input_csc_select select = INPUT_CSC_SELECT_BYPASS;
+ bool force_disable_cursor = false;
+ uint32_t is_2bit = 0;
+ uint32_t alpha_plane_enable = 0;
+ uint32_t dealpha_en = 0, dealpha_ablnd_en = 0;
+ uint32_t realpha_en = 0, realpha_ablnd_en = 0;
+ uint32_t program_prealpha_dealpha = 0;
+ struct out_csc_color_matrix tbl_entry;
+ int i;
+
+ REG_SET_2(FORMAT_CONTROL, 0,
+ CNVC_BYPASS, 0,
+ FORMAT_EXPANSION_MODE, mode);
+
+ REG_UPDATE(FORMAT_CONTROL, FORMAT_CNV16, 0);
+ REG_UPDATE(FORMAT_CONTROL, CNVC_BYPASS_MSB_ALIGN, 0);
+ REG_UPDATE(FORMAT_CONTROL, CLAMP_POSITIVE, 0);
+ REG_UPDATE(FORMAT_CONTROL, CLAMP_POSITIVE_C, 0);
+
+ REG_UPDATE(FORMAT_CONTROL, FORMAT_CROSSBAR_R, 0);
+ REG_UPDATE(FORMAT_CONTROL, FORMAT_CROSSBAR_G, 1);
+ REG_UPDATE(FORMAT_CONTROL, FORMAT_CROSSBAR_B, 2);
+
+ switch (format) {
+ case SURFACE_PIXEL_FORMAT_GRPH_ARGB1555:
+ pixel_format = 1;
+ break;
+ case SURFACE_PIXEL_FORMAT_GRPH_RGB565:
+ pixel_format = 3;
+ alpha_en = 0;
+ break;
+ case SURFACE_PIXEL_FORMAT_GRPH_ARGB8888:
+ case SURFACE_PIXEL_FORMAT_GRPH_ABGR8888:
+ pixel_format = 8;
+ break;
+ case SURFACE_PIXEL_FORMAT_GRPH_ARGB2101010:
+ case SURFACE_PIXEL_FORMAT_GRPH_ABGR2101010:
+ pixel_format = 10;
+ is_2bit = 1;
+ break;
+ case SURFACE_PIXEL_FORMAT_VIDEO_420_YCbCr:
+ force_disable_cursor = false;
+ pixel_format = 65;
+ color_space = COLOR_SPACE_YCBCR709;
+ select = INPUT_CSC_SELECT_ICSC;
+ break;
+ case SURFACE_PIXEL_FORMAT_VIDEO_420_YCrCb:
+ force_disable_cursor = true;
+ pixel_format = 64;
+ color_space = COLOR_SPACE_YCBCR709;
+ select = INPUT_CSC_SELECT_ICSC;
+ break;
+ case SURFACE_PIXEL_FORMAT_VIDEO_420_10bpc_YCbCr:
+ force_disable_cursor = true;
+ pixel_format = 67;
+ color_space = COLOR_SPACE_YCBCR709;
+ select = INPUT_CSC_SELECT_ICSC;
+ break;
+ case SURFACE_PIXEL_FORMAT_VIDEO_420_10bpc_YCrCb:
+ force_disable_cursor = true;
+ pixel_format = 66;
+ color_space = COLOR_SPACE_YCBCR709;
+ select = INPUT_CSC_SELECT_ICSC;
+ break;
+ case SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616:
+ pixel_format = 22;
+ break;
+ case SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616F:
+ pixel_format = 24;
+ break;
+ case SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616F:
+ pixel_format = 25;
+ break;
+ case SURFACE_PIXEL_FORMAT_VIDEO_AYCrCb8888:
+ pixel_format = 12;
+ color_space = COLOR_SPACE_YCBCR709;
+ select = INPUT_CSC_SELECT_ICSC;
+ break;
+ case SURFACE_PIXEL_FORMAT_GRPH_RGB111110_FIX:
+ pixel_format = 112;
+ break;
+ case SURFACE_PIXEL_FORMAT_GRPH_BGR101111_FIX:
+ pixel_format = 113;
+ break;
+ case SURFACE_PIXEL_FORMAT_VIDEO_ACrYCb2101010:
+ pixel_format = 114;
+ color_space = COLOR_SPACE_YCBCR709;
+ select = INPUT_CSC_SELECT_ICSC;
+ is_2bit = 1;
+ break;
+ case SURFACE_PIXEL_FORMAT_VIDEO_CrYCbA1010102:
+ pixel_format = 115;
+ color_space = COLOR_SPACE_YCBCR709;
+ select = INPUT_CSC_SELECT_ICSC;
+ is_2bit = 1;
+ break;
+ case SURFACE_PIXEL_FORMAT_GRPH_RGBE:
+ pixel_format = 116;
+ alpha_plane_enable = 0;
+ break;
+ case SURFACE_PIXEL_FORMAT_GRPH_RGBE_ALPHA:
+ pixel_format = 116;
+ alpha_plane_enable = 1;
+ break;
+ case SURFACE_PIXEL_FORMAT_GRPH_RGB111110_FLOAT:
+ pixel_format = 118;
+ break;
+ case SURFACE_PIXEL_FORMAT_GRPH_BGR101111_FLOAT:
+ pixel_format = 119;
+ break;
+ default:
+ break;
+ }
+
+ if (is_2bit == 1 && alpha_2bit_lut != NULL) {
+ REG_UPDATE(ALPHA_2BIT_LUT, ALPHA_2BIT_LUT0, alpha_2bit_lut->lut0);
+ REG_UPDATE(ALPHA_2BIT_LUT, ALPHA_2BIT_LUT1, alpha_2bit_lut->lut1);
+ REG_UPDATE(ALPHA_2BIT_LUT, ALPHA_2BIT_LUT2, alpha_2bit_lut->lut2);
+ REG_UPDATE(ALPHA_2BIT_LUT, ALPHA_2BIT_LUT3, alpha_2bit_lut->lut3);
+ }
+
+ REG_SET_2(CNVC_SURFACE_PIXEL_FORMAT, 0,
+ CNVC_SURFACE_PIXEL_FORMAT, pixel_format,
+ CNVC_ALPHA_PLANE_ENABLE, alpha_plane_enable);
+ REG_UPDATE(FORMAT_CONTROL, FORMAT_CONTROL__ALPHA_EN, alpha_en);
+
+ if (program_prealpha_dealpha) {
+ dealpha_en = 1;
+ realpha_en = 1;
+ }
+ REG_SET_2(PRE_DEALPHA, 0,
+ PRE_DEALPHA_EN, dealpha_en,
+ PRE_DEALPHA_ABLND_EN, dealpha_ablnd_en);
+ REG_SET_2(PRE_REALPHA, 0,
+ PRE_REALPHA_EN, realpha_en,
+ PRE_REALPHA_ABLND_EN, realpha_ablnd_en);
+
+ /* If input adjustment exists, program the ICSC with those values. */
+ if (input_csc_color_matrix.enable_adjustment == true) {
+ for (i = 0; i < 12; i++)
+ tbl_entry.regval[i] = input_csc_color_matrix.matrix[i];
+
+ tbl_entry.color_space = input_color_space;
+
+ if (color_space >= COLOR_SPACE_YCBCR601)
+ select = INPUT_CSC_SELECT_ICSC;
+ else
+ select = INPUT_CSC_SELECT_BYPASS;
+
+ dpp3_program_post_csc(dpp_base, color_space, select,
+ &tbl_entry);
+ } else {
+ dpp3_program_post_csc(dpp_base, color_space, select, NULL);
+ }
+
+ if (force_disable_cursor) {
+ REG_UPDATE(CURSOR_CONTROL,
+ CURSOR_ENABLE, 0);
+ REG_UPDATE(CURSOR0_CONTROL,
+ CUR0_ENABLE, 0);
+ }
+}
+
+#define IDENTITY_RATIO(ratio) (dc_fixpt_u3d19(ratio) == (1 << 19))
+
+void dpp3_set_cursor_attributes(
+ struct dpp *dpp_base,
+ struct dc_cursor_attributes *cursor_attributes)
+{
+ enum dc_cursor_color_format color_format = cursor_attributes->color_format;
+ struct dcn3_dpp *dpp = TO_DCN30_DPP(dpp_base);
+ int cur_rom_en = 0;
+
+ if (color_format == CURSOR_MODE_COLOR_PRE_MULTIPLIED_ALPHA ||
+ color_format == CURSOR_MODE_COLOR_UN_PRE_MULTIPLIED_ALPHA)
+ cur_rom_en = 1;
+
+ REG_UPDATE_3(CURSOR0_CONTROL,
+ CUR0_MODE, color_format,
+ CUR0_EXPANSION_MODE, 0,
+ CUR0_ROM_EN, cur_rom_en);
+
+ if (color_format == CURSOR_MODE_MONO) {
+ /* todo: clarify what to program these to */
+ REG_UPDATE(CURSOR0_COLOR0,
+ CUR0_COLOR0, 0x00000000);
+ REG_UPDATE(CURSOR0_COLOR1,
+ CUR0_COLOR1, 0xFFFFFFFF);
+ }
+}
+
+
+bool dpp3_get_optimal_number_of_taps(
+ struct dpp *dpp,
+ struct scaler_data *scl_data,
+ const struct scaling_taps *in_taps)
+{
+ int num_part_y, num_part_c;
+ int max_taps_y, max_taps_c;
+ int min_taps_y, min_taps_c;
+ enum lb_memory_config lb_config;
+
+ /* Some ASICs does not support FP16 scaling, so we reject modes require this*/
+ if (scl_data->viewport.width != scl_data->h_active &&
+ scl_data->viewport.height != scl_data->v_active &&
+ dpp->caps->dscl_data_proc_format == DSCL_DATA_PRCESSING_FIXED_FORMAT &&
+ scl_data->format == PIXEL_FORMAT_FP16)
+ return false;
+
+ if (scl_data->viewport.width > scl_data->h_active &&
+ dpp->ctx->dc->debug.max_downscale_src_width != 0 &&
+ scl_data->viewport.width > dpp->ctx->dc->debug.max_downscale_src_width)
+ return false;
+
+ /*
+ * Set default taps if none are provided
+ * From programming guide: taps = min{ ceil(2*H_RATIO,1), 8} for downscaling
+ * taps = 4 for upscaling
+ */
+ if (in_taps->h_taps == 0) {
+ if (dc_fixpt_ceil(scl_data->ratios.horz) > 1)
+ scl_data->taps.h_taps = min(2 * dc_fixpt_ceil(scl_data->ratios.horz), 8);
+ else
+ scl_data->taps.h_taps = 4;
+ } else
+ scl_data->taps.h_taps = in_taps->h_taps;
+ if (in_taps->v_taps == 0) {
+ if (dc_fixpt_ceil(scl_data->ratios.vert) > 1)
+ scl_data->taps.v_taps = min(dc_fixpt_ceil(dc_fixpt_mul_int(scl_data->ratios.vert, 2)), 8);
+ else
+ scl_data->taps.v_taps = 4;
+ } else
+ scl_data->taps.v_taps = in_taps->v_taps;
+ if (in_taps->v_taps_c == 0) {
+ if (dc_fixpt_ceil(scl_data->ratios.vert_c) > 1)
+ scl_data->taps.v_taps_c = min(dc_fixpt_ceil(dc_fixpt_mul_int(scl_data->ratios.vert_c, 2)), 8);
+ else
+ scl_data->taps.v_taps_c = 4;
+ } else
+ scl_data->taps.v_taps_c = in_taps->v_taps_c;
+ if (in_taps->h_taps_c == 0) {
+ if (dc_fixpt_ceil(scl_data->ratios.horz_c) > 1)
+ scl_data->taps.h_taps_c = min(2 * dc_fixpt_ceil(scl_data->ratios.horz_c), 8);
+ else
+ scl_data->taps.h_taps_c = 4;
+ } else if ((in_taps->h_taps_c % 2) != 0 && in_taps->h_taps_c != 1)
+ /* Only 1 and even h_taps_c are supported by hw */
+ scl_data->taps.h_taps_c = in_taps->h_taps_c - 1;
+ else
+ scl_data->taps.h_taps_c = in_taps->h_taps_c;
+
+ /*Ensure we can support the requested number of vtaps*/
+ min_taps_y = dc_fixpt_ceil(scl_data->ratios.vert);
+ min_taps_c = dc_fixpt_ceil(scl_data->ratios.vert_c);
+
+ /* Use LB_MEMORY_CONFIG_3 for 4:2:0 */
+ if ((scl_data->format == PIXEL_FORMAT_420BPP8) || (scl_data->format == PIXEL_FORMAT_420BPP10))
+ lb_config = LB_MEMORY_CONFIG_3;
+ else
+ lb_config = LB_MEMORY_CONFIG_0;
+
+ dpp->caps->dscl_calc_lb_num_partitions(
+ scl_data, lb_config, &num_part_y, &num_part_c);
+
+ /* MAX_V_TAPS = MIN (NUM_LINES - MAX(CEILING(V_RATIO,1)-2, 0), 8) */
+ if (dc_fixpt_ceil(scl_data->ratios.vert) > 2)
+ max_taps_y = num_part_y - (dc_fixpt_ceil(scl_data->ratios.vert) - 2);
+ else
+ max_taps_y = num_part_y;
+
+ if (dc_fixpt_ceil(scl_data->ratios.vert_c) > 2)
+ max_taps_c = num_part_c - (dc_fixpt_ceil(scl_data->ratios.vert_c) - 2);
+ else
+ max_taps_c = num_part_c;
+
+ if (max_taps_y < min_taps_y)
+ return false;
+ else if (max_taps_c < min_taps_c)
+ return false;
+
+ if (scl_data->taps.v_taps > max_taps_y)
+ scl_data->taps.v_taps = max_taps_y;
+
+ if (scl_data->taps.v_taps_c > max_taps_c)
+ scl_data->taps.v_taps_c = max_taps_c;
+
+ if (!dpp->ctx->dc->debug.always_scale) {
+ if (IDENTITY_RATIO(scl_data->ratios.horz))
+ scl_data->taps.h_taps = 1;
+ if (IDENTITY_RATIO(scl_data->ratios.vert))
+ scl_data->taps.v_taps = 1;
+ if (IDENTITY_RATIO(scl_data->ratios.horz_c))
+ scl_data->taps.h_taps_c = 1;
+ if (IDENTITY_RATIO(scl_data->ratios.vert_c))
+ scl_data->taps.v_taps_c = 1;
+ }
+
+ return true;
+}
+
+void dpp3_cnv_set_bias_scale(
+ struct dpp *dpp_base,
+ struct dc_bias_and_scale *bias_and_scale)
+{
+ struct dcn3_dpp *dpp = TO_DCN30_DPP(dpp_base);
+
+ REG_UPDATE(FCNV_FP_BIAS_R, FCNV_FP_BIAS_R, bias_and_scale->bias_red);
+ REG_UPDATE(FCNV_FP_BIAS_G, FCNV_FP_BIAS_G, bias_and_scale->bias_green);
+ REG_UPDATE(FCNV_FP_BIAS_B, FCNV_FP_BIAS_B, bias_and_scale->bias_blue);
+ REG_UPDATE(FCNV_FP_SCALE_R, FCNV_FP_SCALE_R, bias_and_scale->scale_red);
+ REG_UPDATE(FCNV_FP_SCALE_G, FCNV_FP_SCALE_G, bias_and_scale->scale_green);
+ REG_UPDATE(FCNV_FP_SCALE_B, FCNV_FP_SCALE_B, bias_and_scale->scale_blue);
+}
+
+static void dpp3_power_on_blnd_lut(
+ struct dpp *dpp_base,
+ bool power_on)
+{
+ struct dcn3_dpp *dpp = TO_DCN30_DPP(dpp_base);
+
+ REG_SET(CM_MEM_PWR_CTRL, 0,
+ BLNDGAM_MEM_PWR_FORCE, power_on == true ? 0:1);
+
+}
+
+static void dpp3_configure_blnd_lut(
+ struct dpp *dpp_base,
+ bool is_ram_a)
+{
+ struct dcn3_dpp *dpp = TO_DCN30_DPP(dpp_base);
+
+ REG_UPDATE_2(CM_BLNDGAM_LUT_CONTROL,
+ CM_BLNDGAM_LUT_WRITE_COLOR_MASK, 7,
+ CM_BLNDGAM_LUT_HOST_SEL, is_ram_a == true ? 0 : 1);
+
+ REG_SET(CM_BLNDGAM_LUT_INDEX, 0, CM_BLNDGAM_LUT_INDEX, 0);
+}
+
+static void dpp3_program_blnd_pwl(
+ struct dpp *dpp_base,
+ const struct pwl_result_data *rgb,
+ uint32_t num)
+{
+ uint32_t i;
+ struct dcn3_dpp *dpp = TO_DCN30_DPP(dpp_base);
+ uint32_t last_base_value_red = rgb[num-1].red_reg + rgb[num-1].delta_red_reg;
+ uint32_t last_base_value_green = rgb[num-1].green_reg + rgb[num-1].delta_green_reg;
+ uint32_t last_base_value_blue = rgb[num-1].blue_reg + rgb[num-1].delta_blue_reg;
+
+ if (is_rgb_equal(rgb, num)) {
+ for (i = 0 ; i < num; i++)
+ REG_SET(CM_BLNDGAM_LUT_DATA, 0, CM_BLNDGAM_LUT_DATA, rgb[i].red_reg);
+ REG_SET(CM_BLNDGAM_LUT_DATA, 0, CM_BLNDGAM_LUT_DATA, last_base_value_red);
+ } else {
+ REG_UPDATE(CM_BLNDGAM_LUT_CONTROL, CM_BLNDGAM_LUT_WRITE_COLOR_MASK, 4);
+ for (i = 0 ; i < num; i++)
+ REG_SET(CM_BLNDGAM_LUT_DATA, 0, CM_BLNDGAM_LUT_DATA, rgb[i].red_reg);
+ REG_SET(CM_BLNDGAM_LUT_DATA, 0, CM_BLNDGAM_LUT_DATA, last_base_value_red);
+
+ REG_UPDATE(CM_BLNDGAM_LUT_CONTROL, CM_BLNDGAM_LUT_WRITE_COLOR_MASK, 2);
+ for (i = 0 ; i < num; i++)
+ REG_SET(CM_BLNDGAM_LUT_DATA, 0, CM_BLNDGAM_LUT_DATA, rgb[i].green_reg);
+ REG_SET(CM_BLNDGAM_LUT_DATA, 0, CM_BLNDGAM_LUT_DATA, last_base_value_green);
+
+ REG_UPDATE(CM_BLNDGAM_LUT_CONTROL, CM_BLNDGAM_LUT_WRITE_COLOR_MASK, 1);
+ for (i = 0 ; i < num; i++)
+ REG_SET(CM_BLNDGAM_LUT_DATA, 0, CM_BLNDGAM_LUT_DATA, rgb[i].blue_reg);
+ REG_SET(CM_BLNDGAM_LUT_DATA, 0, CM_BLNDGAM_LUT_DATA, last_base_value_blue);
+ }
+}
+
+static void dcn3_dpp_cm_get_reg_field(
+ struct dcn3_dpp *dpp,
+ struct dcn3_xfer_func_reg *reg)
+{
+ reg->shifts.exp_region0_lut_offset = dpp->tf_shift->CM_BLNDGAM_RAMA_EXP_REGION0_LUT_OFFSET;
+ reg->masks.exp_region0_lut_offset = dpp->tf_mask->CM_BLNDGAM_RAMA_EXP_REGION0_LUT_OFFSET;
+ reg->shifts.exp_region0_num_segments = dpp->tf_shift->CM_BLNDGAM_RAMA_EXP_REGION0_NUM_SEGMENTS;
+ reg->masks.exp_region0_num_segments = dpp->tf_mask->CM_BLNDGAM_RAMA_EXP_REGION0_NUM_SEGMENTS;
+ reg->shifts.exp_region1_lut_offset = dpp->tf_shift->CM_BLNDGAM_RAMA_EXP_REGION1_LUT_OFFSET;
+ reg->masks.exp_region1_lut_offset = dpp->tf_mask->CM_BLNDGAM_RAMA_EXP_REGION1_LUT_OFFSET;
+ reg->shifts.exp_region1_num_segments = dpp->tf_shift->CM_BLNDGAM_RAMA_EXP_REGION1_NUM_SEGMENTS;
+ reg->masks.exp_region1_num_segments = dpp->tf_mask->CM_BLNDGAM_RAMA_EXP_REGION1_NUM_SEGMENTS;
+
+ reg->shifts.field_region_end = dpp->tf_shift->CM_BLNDGAM_RAMA_EXP_REGION_END_B;
+ reg->masks.field_region_end = dpp->tf_mask->CM_BLNDGAM_RAMA_EXP_REGION_END_B;
+ reg->shifts.field_region_end_slope = dpp->tf_shift->CM_BLNDGAM_RAMA_EXP_REGION_END_SLOPE_B;
+ reg->masks.field_region_end_slope = dpp->tf_mask->CM_BLNDGAM_RAMA_EXP_REGION_END_SLOPE_B;
+ reg->shifts.field_region_end_base = dpp->tf_shift->CM_BLNDGAM_RAMA_EXP_REGION_END_BASE_B;
+ reg->masks.field_region_end_base = dpp->tf_mask->CM_BLNDGAM_RAMA_EXP_REGION_END_BASE_B;
+ reg->shifts.field_region_linear_slope = dpp->tf_shift->CM_BLNDGAM_RAMA_EXP_REGION_START_SLOPE_B;
+ reg->masks.field_region_linear_slope = dpp->tf_mask->CM_BLNDGAM_RAMA_EXP_REGION_START_SLOPE_B;
+ reg->shifts.exp_region_start = dpp->tf_shift->CM_BLNDGAM_RAMA_EXP_REGION_START_B;
+ reg->masks.exp_region_start = dpp->tf_mask->CM_BLNDGAM_RAMA_EXP_REGION_START_B;
+ reg->shifts.exp_resion_start_segment = dpp->tf_shift->CM_BLNDGAM_RAMA_EXP_REGION_START_SEGMENT_B;
+ reg->masks.exp_resion_start_segment = dpp->tf_mask->CM_BLNDGAM_RAMA_EXP_REGION_START_SEGMENT_B;
+}
+
+/*program blnd lut RAM A*/
+static void dpp3_program_blnd_luta_settings(
+ struct dpp *dpp_base,
+ const struct pwl_params *params)
+{
+ struct dcn3_dpp *dpp = TO_DCN30_DPP(dpp_base);
+ struct dcn3_xfer_func_reg gam_regs;
+
+ dcn3_dpp_cm_get_reg_field(dpp, &gam_regs);
+
+ gam_regs.start_cntl_b = REG(CM_BLNDGAM_RAMA_START_CNTL_B);
+ gam_regs.start_cntl_g = REG(CM_BLNDGAM_RAMA_START_CNTL_G);
+ gam_regs.start_cntl_r = REG(CM_BLNDGAM_RAMA_START_CNTL_R);
+ gam_regs.start_slope_cntl_b = REG(CM_BLNDGAM_RAMA_START_SLOPE_CNTL_B);
+ gam_regs.start_slope_cntl_g = REG(CM_BLNDGAM_RAMA_START_SLOPE_CNTL_G);
+ gam_regs.start_slope_cntl_r = REG(CM_BLNDGAM_RAMA_START_SLOPE_CNTL_R);
+ gam_regs.start_end_cntl1_b = REG(CM_BLNDGAM_RAMA_END_CNTL1_B);
+ gam_regs.start_end_cntl2_b = REG(CM_BLNDGAM_RAMA_END_CNTL2_B);
+ gam_regs.start_end_cntl1_g = REG(CM_BLNDGAM_RAMA_END_CNTL1_G);
+ gam_regs.start_end_cntl2_g = REG(CM_BLNDGAM_RAMA_END_CNTL2_G);
+ gam_regs.start_end_cntl1_r = REG(CM_BLNDGAM_RAMA_END_CNTL1_R);
+ gam_regs.start_end_cntl2_r = REG(CM_BLNDGAM_RAMA_END_CNTL2_R);
+ gam_regs.region_start = REG(CM_BLNDGAM_RAMA_REGION_0_1);
+ gam_regs.region_end = REG(CM_BLNDGAM_RAMA_REGION_32_33);
+
+ cm_helper_program_gamcor_xfer_func(dpp->base.ctx, params, &gam_regs);
+}
+
+/*program blnd lut RAM B*/
+static void dpp3_program_blnd_lutb_settings(
+ struct dpp *dpp_base,
+ const struct pwl_params *params)
+{
+ struct dcn3_dpp *dpp = TO_DCN30_DPP(dpp_base);
+ struct dcn3_xfer_func_reg gam_regs;
+
+ dcn3_dpp_cm_get_reg_field(dpp, &gam_regs);
+
+ gam_regs.start_cntl_b = REG(CM_BLNDGAM_RAMB_START_CNTL_B);
+ gam_regs.start_cntl_g = REG(CM_BLNDGAM_RAMB_START_CNTL_G);
+ gam_regs.start_cntl_r = REG(CM_BLNDGAM_RAMB_START_CNTL_R);
+ gam_regs.start_slope_cntl_b = REG(CM_BLNDGAM_RAMB_START_SLOPE_CNTL_B);
+ gam_regs.start_slope_cntl_g = REG(CM_BLNDGAM_RAMB_START_SLOPE_CNTL_G);
+ gam_regs.start_slope_cntl_r = REG(CM_BLNDGAM_RAMB_START_SLOPE_CNTL_R);
+ gam_regs.start_end_cntl1_b = REG(CM_BLNDGAM_RAMB_END_CNTL1_B);
+ gam_regs.start_end_cntl2_b = REG(CM_BLNDGAM_RAMB_END_CNTL2_B);
+ gam_regs.start_end_cntl1_g = REG(CM_BLNDGAM_RAMB_END_CNTL1_G);
+ gam_regs.start_end_cntl2_g = REG(CM_BLNDGAM_RAMB_END_CNTL2_G);
+ gam_regs.start_end_cntl1_r = REG(CM_BLNDGAM_RAMB_END_CNTL1_R);
+ gam_regs.start_end_cntl2_r = REG(CM_BLNDGAM_RAMB_END_CNTL2_R);
+ gam_regs.region_start = REG(CM_BLNDGAM_RAMB_REGION_0_1);
+ gam_regs.region_end = REG(CM_BLNDGAM_RAMB_REGION_32_33);
+
+ cm_helper_program_gamcor_xfer_func(dpp->base.ctx, params, &gam_regs);
+}
+
+static enum dc_lut_mode dpp3_get_blndgam_current(struct dpp *dpp_base)
+{
+ enum dc_lut_mode mode;
+ uint32_t mode_current = 0;
+ uint32_t in_use = 0;
+
+ struct dcn3_dpp *dpp = TO_DCN30_DPP(dpp_base);
+
+ REG_GET(CM_BLNDGAM_CONTROL,
+ CM_BLNDGAM_MODE_CURRENT, &mode_current);
+ REG_GET(CM_BLNDGAM_CONTROL,
+ CM_BLNDGAM_SELECT_CURRENT, &in_use);
+
+ switch (mode_current) {
+ case 0:
+ case 1:
+ mode = LUT_BYPASS;
+ break;
+
+ case 2:
+ if (in_use == 0)
+ mode = LUT_RAM_A;
+ else
+ mode = LUT_RAM_B;
+ break;
+ default:
+ mode = LUT_BYPASS;
+ break;
+ }
+ return mode;
+}
+
+bool dpp3_program_blnd_lut(
+ struct dpp *dpp_base, const struct pwl_params *params)
+{
+ enum dc_lut_mode current_mode;
+ enum dc_lut_mode next_mode;
+ struct dcn3_dpp *dpp = TO_DCN30_DPP(dpp_base);
+
+ if (params == NULL) {
+ REG_SET(CM_BLNDGAM_CONTROL, 0, CM_BLNDGAM_MODE, 0);
+ return false;
+ }
+
+ current_mode = dpp3_get_blndgam_current(dpp_base);
+ if (current_mode == LUT_BYPASS || current_mode == LUT_RAM_B)
+ next_mode = LUT_RAM_A;
+ else
+ next_mode = LUT_RAM_B;
+
+ dpp3_power_on_blnd_lut(dpp_base, true);
+ dpp3_configure_blnd_lut(dpp_base, next_mode == LUT_RAM_A ? true:false);
+
+ if (next_mode == LUT_RAM_A)
+ dpp3_program_blnd_luta_settings(dpp_base, params);
+ else
+ dpp3_program_blnd_lutb_settings(dpp_base, params);
+
+ dpp3_program_blnd_pwl(
+ dpp_base, params->rgb_resulted, params->hw_points_num);
+
+ REG_UPDATE_2(CM_BLNDGAM_CONTROL,
+ CM_BLNDGAM_MODE, 2,
+ CM_BLNDGAM_SELECT, next_mode == LUT_RAM_A ? 0 : 1);
+
+ return true;
+}
+
+
+static void dpp3_program_shaper_lut(
+ struct dpp *dpp_base,
+ const struct pwl_result_data *rgb,
+ uint32_t num)
+{
+ uint32_t i, red, green, blue;
+ uint32_t red_delta, green_delta, blue_delta;
+ uint32_t red_value, green_value, blue_value;
+
+ struct dcn3_dpp *dpp = TO_DCN30_DPP(dpp_base);
+
+ for (i = 0 ; i < num; i++) {
+
+ red = rgb[i].red_reg;
+ green = rgb[i].green_reg;
+ blue = rgb[i].blue_reg;
+
+ red_delta = rgb[i].delta_red_reg;
+ green_delta = rgb[i].delta_green_reg;
+ blue_delta = rgb[i].delta_blue_reg;
+
+ red_value = ((red_delta & 0x3ff) << 14) | (red & 0x3fff);
+ green_value = ((green_delta & 0x3ff) << 14) | (green & 0x3fff);
+ blue_value = ((blue_delta & 0x3ff) << 14) | (blue & 0x3fff);
+
+ REG_SET(CM_SHAPER_LUT_DATA, 0, CM_SHAPER_LUT_DATA, red_value);
+ REG_SET(CM_SHAPER_LUT_DATA, 0, CM_SHAPER_LUT_DATA, green_value);
+ REG_SET(CM_SHAPER_LUT_DATA, 0, CM_SHAPER_LUT_DATA, blue_value);
+ }
+
+}
+
+static enum dc_lut_mode dpp3_get_shaper_current(struct dpp *dpp_base)
+{
+ enum dc_lut_mode mode;
+ uint32_t state_mode;
+ struct dcn3_dpp *dpp = TO_DCN30_DPP(dpp_base);
+
+ REG_GET(CM_SHAPER_CONTROL,
+ CM_SHAPER_MODE_CURRENT, &state_mode);
+
+ switch (state_mode) {
+ case 0:
+ mode = LUT_BYPASS;
+ break;
+ case 1:
+ mode = LUT_RAM_A;
+ break;
+ case 2:
+ mode = LUT_RAM_B;
+ break;
+ default:
+ mode = LUT_BYPASS;
+ break;
+ }
+ return mode;
+}
+
+static void dpp3_configure_shaper_lut(
+ struct dpp *dpp_base,
+ bool is_ram_a)
+{
+ struct dcn3_dpp *dpp = TO_DCN30_DPP(dpp_base);
+
+ REG_UPDATE(CM_SHAPER_LUT_WRITE_EN_MASK,
+ CM_SHAPER_LUT_WRITE_EN_MASK, 7);
+ REG_UPDATE(CM_SHAPER_LUT_WRITE_EN_MASK,
+ CM_SHAPER_LUT_WRITE_SEL, is_ram_a == true ? 0:1);
+ REG_SET(CM_SHAPER_LUT_INDEX, 0, CM_SHAPER_LUT_INDEX, 0);
+}
+
+/*program shaper RAM A*/
+
+static void dpp3_program_shaper_luta_settings(
+ struct dpp *dpp_base,
+ const struct pwl_params *params)
+{
+ const struct gamma_curve *curve;
+ struct dcn3_dpp *dpp = TO_DCN30_DPP(dpp_base);
+
+ REG_SET_2(CM_SHAPER_RAMA_START_CNTL_B, 0,
+ CM_SHAPER_RAMA_EXP_REGION_START_B, params->corner_points[0].blue.custom_float_x,
+ CM_SHAPER_RAMA_EXP_REGION_START_SEGMENT_B, 0);
+ REG_SET_2(CM_SHAPER_RAMA_START_CNTL_G, 0,
+ CM_SHAPER_RAMA_EXP_REGION_START_G, params->corner_points[0].green.custom_float_x,
+ CM_SHAPER_RAMA_EXP_REGION_START_SEGMENT_G, 0);
+ REG_SET_2(CM_SHAPER_RAMA_START_CNTL_R, 0,
+ CM_SHAPER_RAMA_EXP_REGION_START_R, params->corner_points[0].red.custom_float_x,
+ CM_SHAPER_RAMA_EXP_REGION_START_SEGMENT_R, 0);
+
+ REG_SET_2(CM_SHAPER_RAMA_END_CNTL_B, 0,
+ CM_SHAPER_RAMA_EXP_REGION_END_B, params->corner_points[1].blue.custom_float_x,
+ CM_SHAPER_RAMA_EXP_REGION_END_BASE_B, params->corner_points[1].blue.custom_float_y);
+
+ REG_SET_2(CM_SHAPER_RAMA_END_CNTL_G, 0,
+ CM_SHAPER_RAMA_EXP_REGION_END_G, params->corner_points[1].green.custom_float_x,
+ CM_SHAPER_RAMA_EXP_REGION_END_BASE_G, params->corner_points[1].green.custom_float_y);
+
+ REG_SET_2(CM_SHAPER_RAMA_END_CNTL_R, 0,
+ CM_SHAPER_RAMA_EXP_REGION_END_R, params->corner_points[1].red.custom_float_x,
+ CM_SHAPER_RAMA_EXP_REGION_END_BASE_R, params->corner_points[1].red.custom_float_y);
+
+ curve = params->arr_curve_points;
+ REG_SET_4(CM_SHAPER_RAMA_REGION_0_1, 0,
+ CM_SHAPER_RAMA_EXP_REGION0_LUT_OFFSET, curve[0].offset,
+ CM_SHAPER_RAMA_EXP_REGION0_NUM_SEGMENTS, curve[0].segments_num,
+ CM_SHAPER_RAMA_EXP_REGION1_LUT_OFFSET, curve[1].offset,
+ CM_SHAPER_RAMA_EXP_REGION1_NUM_SEGMENTS, curve[1].segments_num);
+
+ curve += 2;
+ REG_SET_4(CM_SHAPER_RAMA_REGION_2_3, 0,
+ CM_SHAPER_RAMA_EXP_REGION2_LUT_OFFSET, curve[0].offset,
+ CM_SHAPER_RAMA_EXP_REGION2_NUM_SEGMENTS, curve[0].segments_num,
+ CM_SHAPER_RAMA_EXP_REGION3_LUT_OFFSET, curve[1].offset,
+ CM_SHAPER_RAMA_EXP_REGION3_NUM_SEGMENTS, curve[1].segments_num);
+
+ curve += 2;
+ REG_SET_4(CM_SHAPER_RAMA_REGION_4_5, 0,
+ CM_SHAPER_RAMA_EXP_REGION4_LUT_OFFSET, curve[0].offset,
+ CM_SHAPER_RAMA_EXP_REGION4_NUM_SEGMENTS, curve[0].segments_num,
+ CM_SHAPER_RAMA_EXP_REGION5_LUT_OFFSET, curve[1].offset,
+ CM_SHAPER_RAMA_EXP_REGION5_NUM_SEGMENTS, curve[1].segments_num);
+
+ curve += 2;
+ REG_SET_4(CM_SHAPER_RAMA_REGION_6_7, 0,
+ CM_SHAPER_RAMA_EXP_REGION6_LUT_OFFSET, curve[0].offset,
+ CM_SHAPER_RAMA_EXP_REGION6_NUM_SEGMENTS, curve[0].segments_num,
+ CM_SHAPER_RAMA_EXP_REGION7_LUT_OFFSET, curve[1].offset,
+ CM_SHAPER_RAMA_EXP_REGION7_NUM_SEGMENTS, curve[1].segments_num);
+
+ curve += 2;
+ REG_SET_4(CM_SHAPER_RAMA_REGION_8_9, 0,
+ CM_SHAPER_RAMA_EXP_REGION8_LUT_OFFSET, curve[0].offset,
+ CM_SHAPER_RAMA_EXP_REGION8_NUM_SEGMENTS, curve[0].segments_num,
+ CM_SHAPER_RAMA_EXP_REGION9_LUT_OFFSET, curve[1].offset,
+ CM_SHAPER_RAMA_EXP_REGION9_NUM_SEGMENTS, curve[1].segments_num);
+
+ curve += 2;
+ REG_SET_4(CM_SHAPER_RAMA_REGION_10_11, 0,
+ CM_SHAPER_RAMA_EXP_REGION10_LUT_OFFSET, curve[0].offset,
+ CM_SHAPER_RAMA_EXP_REGION10_NUM_SEGMENTS, curve[0].segments_num,
+ CM_SHAPER_RAMA_EXP_REGION11_LUT_OFFSET, curve[1].offset,
+ CM_SHAPER_RAMA_EXP_REGION11_NUM_SEGMENTS, curve[1].segments_num);
+
+ curve += 2;
+ REG_SET_4(CM_SHAPER_RAMA_REGION_12_13, 0,
+ CM_SHAPER_RAMA_EXP_REGION12_LUT_OFFSET, curve[0].offset,
+ CM_SHAPER_RAMA_EXP_REGION12_NUM_SEGMENTS, curve[0].segments_num,
+ CM_SHAPER_RAMA_EXP_REGION13_LUT_OFFSET, curve[1].offset,
+ CM_SHAPER_RAMA_EXP_REGION13_NUM_SEGMENTS, curve[1].segments_num);
+
+ curve += 2;
+ REG_SET_4(CM_SHAPER_RAMA_REGION_14_15, 0,
+ CM_SHAPER_RAMA_EXP_REGION14_LUT_OFFSET, curve[0].offset,
+ CM_SHAPER_RAMA_EXP_REGION14_NUM_SEGMENTS, curve[0].segments_num,
+ CM_SHAPER_RAMA_EXP_REGION15_LUT_OFFSET, curve[1].offset,
+ CM_SHAPER_RAMA_EXP_REGION15_NUM_SEGMENTS, curve[1].segments_num);
+
+ curve += 2;
+ REG_SET_4(CM_SHAPER_RAMA_REGION_16_17, 0,
+ CM_SHAPER_RAMA_EXP_REGION16_LUT_OFFSET, curve[0].offset,
+ CM_SHAPER_RAMA_EXP_REGION16_NUM_SEGMENTS, curve[0].segments_num,
+ CM_SHAPER_RAMA_EXP_REGION17_LUT_OFFSET, curve[1].offset,
+ CM_SHAPER_RAMA_EXP_REGION17_NUM_SEGMENTS, curve[1].segments_num);
+
+ curve += 2;
+ REG_SET_4(CM_SHAPER_RAMA_REGION_18_19, 0,
+ CM_SHAPER_RAMA_EXP_REGION18_LUT_OFFSET, curve[0].offset,
+ CM_SHAPER_RAMA_EXP_REGION18_NUM_SEGMENTS, curve[0].segments_num,
+ CM_SHAPER_RAMA_EXP_REGION19_LUT_OFFSET, curve[1].offset,
+ CM_SHAPER_RAMA_EXP_REGION19_NUM_SEGMENTS, curve[1].segments_num);
+
+ curve += 2;
+ REG_SET_4(CM_SHAPER_RAMA_REGION_20_21, 0,
+ CM_SHAPER_RAMA_EXP_REGION20_LUT_OFFSET, curve[0].offset,
+ CM_SHAPER_RAMA_EXP_REGION20_NUM_SEGMENTS, curve[0].segments_num,
+ CM_SHAPER_RAMA_EXP_REGION21_LUT_OFFSET, curve[1].offset,
+ CM_SHAPER_RAMA_EXP_REGION21_NUM_SEGMENTS, curve[1].segments_num);
+
+ curve += 2;
+ REG_SET_4(CM_SHAPER_RAMA_REGION_22_23, 0,
+ CM_SHAPER_RAMA_EXP_REGION22_LUT_OFFSET, curve[0].offset,
+ CM_SHAPER_RAMA_EXP_REGION22_NUM_SEGMENTS, curve[0].segments_num,
+ CM_SHAPER_RAMA_EXP_REGION23_LUT_OFFSET, curve[1].offset,
+ CM_SHAPER_RAMA_EXP_REGION23_NUM_SEGMENTS, curve[1].segments_num);
+
+ curve += 2;
+ REG_SET_4(CM_SHAPER_RAMA_REGION_24_25, 0,
+ CM_SHAPER_RAMA_EXP_REGION24_LUT_OFFSET, curve[0].offset,
+ CM_SHAPER_RAMA_EXP_REGION24_NUM_SEGMENTS, curve[0].segments_num,
+ CM_SHAPER_RAMA_EXP_REGION25_LUT_OFFSET, curve[1].offset,
+ CM_SHAPER_RAMA_EXP_REGION25_NUM_SEGMENTS, curve[1].segments_num);
+
+ curve += 2;
+ REG_SET_4(CM_SHAPER_RAMA_REGION_26_27, 0,
+ CM_SHAPER_RAMA_EXP_REGION26_LUT_OFFSET, curve[0].offset,
+ CM_SHAPER_RAMA_EXP_REGION26_NUM_SEGMENTS, curve[0].segments_num,
+ CM_SHAPER_RAMA_EXP_REGION27_LUT_OFFSET, curve[1].offset,
+ CM_SHAPER_RAMA_EXP_REGION27_NUM_SEGMENTS, curve[1].segments_num);
+
+ curve += 2;
+ REG_SET_4(CM_SHAPER_RAMA_REGION_28_29, 0,
+ CM_SHAPER_RAMA_EXP_REGION28_LUT_OFFSET, curve[0].offset,
+ CM_SHAPER_RAMA_EXP_REGION28_NUM_SEGMENTS, curve[0].segments_num,
+ CM_SHAPER_RAMA_EXP_REGION29_LUT_OFFSET, curve[1].offset,
+ CM_SHAPER_RAMA_EXP_REGION29_NUM_SEGMENTS, curve[1].segments_num);
+
+ curve += 2;
+ REG_SET_4(CM_SHAPER_RAMA_REGION_30_31, 0,
+ CM_SHAPER_RAMA_EXP_REGION30_LUT_OFFSET, curve[0].offset,
+ CM_SHAPER_RAMA_EXP_REGION30_NUM_SEGMENTS, curve[0].segments_num,
+ CM_SHAPER_RAMA_EXP_REGION31_LUT_OFFSET, curve[1].offset,
+ CM_SHAPER_RAMA_EXP_REGION31_NUM_SEGMENTS, curve[1].segments_num);
+
+ curve += 2;
+ REG_SET_4(CM_SHAPER_RAMA_REGION_32_33, 0,
+ CM_SHAPER_RAMA_EXP_REGION32_LUT_OFFSET, curve[0].offset,
+ CM_SHAPER_RAMA_EXP_REGION32_NUM_SEGMENTS, curve[0].segments_num,
+ CM_SHAPER_RAMA_EXP_REGION33_LUT_OFFSET, curve[1].offset,
+ CM_SHAPER_RAMA_EXP_REGION33_NUM_SEGMENTS, curve[1].segments_num);
+}
+
+/*program shaper RAM B*/
+static void dpp3_program_shaper_lutb_settings(
+ struct dpp *dpp_base,
+ const struct pwl_params *params)
+{
+ const struct gamma_curve *curve;
+ struct dcn3_dpp *dpp = TO_DCN30_DPP(dpp_base);
+
+ REG_SET_2(CM_SHAPER_RAMB_START_CNTL_B, 0,
+ CM_SHAPER_RAMB_EXP_REGION_START_B, params->corner_points[0].blue.custom_float_x,
+ CM_SHAPER_RAMB_EXP_REGION_START_SEGMENT_B, 0);
+ REG_SET_2(CM_SHAPER_RAMB_START_CNTL_G, 0,
+ CM_SHAPER_RAMB_EXP_REGION_START_G, params->corner_points[0].green.custom_float_x,
+ CM_SHAPER_RAMB_EXP_REGION_START_SEGMENT_G, 0);
+ REG_SET_2(CM_SHAPER_RAMB_START_CNTL_R, 0,
+ CM_SHAPER_RAMB_EXP_REGION_START_R, params->corner_points[0].red.custom_float_x,
+ CM_SHAPER_RAMB_EXP_REGION_START_SEGMENT_R, 0);
+
+ REG_SET_2(CM_SHAPER_RAMB_END_CNTL_B, 0,
+ CM_SHAPER_RAMB_EXP_REGION_END_B, params->corner_points[1].blue.custom_float_x,
+ CM_SHAPER_RAMB_EXP_REGION_END_BASE_B, params->corner_points[1].blue.custom_float_y);
+
+ REG_SET_2(CM_SHAPER_RAMB_END_CNTL_G, 0,
+ CM_SHAPER_RAMB_EXP_REGION_END_G, params->corner_points[1].green.custom_float_x,
+ CM_SHAPER_RAMB_EXP_REGION_END_BASE_G, params->corner_points[1].green.custom_float_y);
+
+ REG_SET_2(CM_SHAPER_RAMB_END_CNTL_R, 0,
+ CM_SHAPER_RAMB_EXP_REGION_END_R, params->corner_points[1].red.custom_float_x,
+ CM_SHAPER_RAMB_EXP_REGION_END_BASE_R, params->corner_points[1].red.custom_float_y);
+
+ curve = params->arr_curve_points;
+ REG_SET_4(CM_SHAPER_RAMB_REGION_0_1, 0,
+ CM_SHAPER_RAMB_EXP_REGION0_LUT_OFFSET, curve[0].offset,
+ CM_SHAPER_RAMB_EXP_REGION0_NUM_SEGMENTS, curve[0].segments_num,
+ CM_SHAPER_RAMB_EXP_REGION1_LUT_OFFSET, curve[1].offset,
+ CM_SHAPER_RAMB_EXP_REGION1_NUM_SEGMENTS, curve[1].segments_num);
+
+ curve += 2;
+ REG_SET_4(CM_SHAPER_RAMB_REGION_2_3, 0,
+ CM_SHAPER_RAMB_EXP_REGION2_LUT_OFFSET, curve[0].offset,
+ CM_SHAPER_RAMB_EXP_REGION2_NUM_SEGMENTS, curve[0].segments_num,
+ CM_SHAPER_RAMB_EXP_REGION3_LUT_OFFSET, curve[1].offset,
+ CM_SHAPER_RAMB_EXP_REGION3_NUM_SEGMENTS, curve[1].segments_num);
+
+ curve += 2;
+ REG_SET_4(CM_SHAPER_RAMB_REGION_4_5, 0,
+ CM_SHAPER_RAMB_EXP_REGION4_LUT_OFFSET, curve[0].offset,
+ CM_SHAPER_RAMB_EXP_REGION4_NUM_SEGMENTS, curve[0].segments_num,
+ CM_SHAPER_RAMB_EXP_REGION5_LUT_OFFSET, curve[1].offset,
+ CM_SHAPER_RAMB_EXP_REGION5_NUM_SEGMENTS, curve[1].segments_num);
+
+ curve += 2;
+ REG_SET_4(CM_SHAPER_RAMB_REGION_6_7, 0,
+ CM_SHAPER_RAMB_EXP_REGION6_LUT_OFFSET, curve[0].offset,
+ CM_SHAPER_RAMB_EXP_REGION6_NUM_SEGMENTS, curve[0].segments_num,
+ CM_SHAPER_RAMB_EXP_REGION7_LUT_OFFSET, curve[1].offset,
+ CM_SHAPER_RAMB_EXP_REGION7_NUM_SEGMENTS, curve[1].segments_num);
+
+ curve += 2;
+ REG_SET_4(CM_SHAPER_RAMB_REGION_8_9, 0,
+ CM_SHAPER_RAMB_EXP_REGION8_LUT_OFFSET, curve[0].offset,
+ CM_SHAPER_RAMB_EXP_REGION8_NUM_SEGMENTS, curve[0].segments_num,
+ CM_SHAPER_RAMB_EXP_REGION9_LUT_OFFSET, curve[1].offset,
+ CM_SHAPER_RAMB_EXP_REGION9_NUM_SEGMENTS, curve[1].segments_num);
+
+ curve += 2;
+ REG_SET_4(CM_SHAPER_RAMB_REGION_10_11, 0,
+ CM_SHAPER_RAMB_EXP_REGION10_LUT_OFFSET, curve[0].offset,
+ CM_SHAPER_RAMB_EXP_REGION10_NUM_SEGMENTS, curve[0].segments_num,
+ CM_SHAPER_RAMB_EXP_REGION11_LUT_OFFSET, curve[1].offset,
+ CM_SHAPER_RAMB_EXP_REGION11_NUM_SEGMENTS, curve[1].segments_num);
+
+ curve += 2;
+ REG_SET_4(CM_SHAPER_RAMB_REGION_12_13, 0,
+ CM_SHAPER_RAMB_EXP_REGION12_LUT_OFFSET, curve[0].offset,
+ CM_SHAPER_RAMB_EXP_REGION12_NUM_SEGMENTS, curve[0].segments_num,
+ CM_SHAPER_RAMB_EXP_REGION13_LUT_OFFSET, curve[1].offset,
+ CM_SHAPER_RAMB_EXP_REGION13_NUM_SEGMENTS, curve[1].segments_num);
+
+ curve += 2;
+ REG_SET_4(CM_SHAPER_RAMB_REGION_14_15, 0,
+ CM_SHAPER_RAMB_EXP_REGION14_LUT_OFFSET, curve[0].offset,
+ CM_SHAPER_RAMB_EXP_REGION14_NUM_SEGMENTS, curve[0].segments_num,
+ CM_SHAPER_RAMB_EXP_REGION15_LUT_OFFSET, curve[1].offset,
+ CM_SHAPER_RAMB_EXP_REGION15_NUM_SEGMENTS, curve[1].segments_num);
+
+ curve += 2;
+ REG_SET_4(CM_SHAPER_RAMB_REGION_16_17, 0,
+ CM_SHAPER_RAMB_EXP_REGION16_LUT_OFFSET, curve[0].offset,
+ CM_SHAPER_RAMB_EXP_REGION16_NUM_SEGMENTS, curve[0].segments_num,
+ CM_SHAPER_RAMB_EXP_REGION17_LUT_OFFSET, curve[1].offset,
+ CM_SHAPER_RAMB_EXP_REGION17_NUM_SEGMENTS, curve[1].segments_num);
+
+ curve += 2;
+ REG_SET_4(CM_SHAPER_RAMB_REGION_18_19, 0,
+ CM_SHAPER_RAMB_EXP_REGION18_LUT_OFFSET, curve[0].offset,
+ CM_SHAPER_RAMB_EXP_REGION18_NUM_SEGMENTS, curve[0].segments_num,
+ CM_SHAPER_RAMB_EXP_REGION19_LUT_OFFSET, curve[1].offset,
+ CM_SHAPER_RAMB_EXP_REGION19_NUM_SEGMENTS, curve[1].segments_num);
+
+ curve += 2;
+ REG_SET_4(CM_SHAPER_RAMB_REGION_20_21, 0,
+ CM_SHAPER_RAMB_EXP_REGION20_LUT_OFFSET, curve[0].offset,
+ CM_SHAPER_RAMB_EXP_REGION20_NUM_SEGMENTS, curve[0].segments_num,
+ CM_SHAPER_RAMB_EXP_REGION21_LUT_OFFSET, curve[1].offset,
+ CM_SHAPER_RAMB_EXP_REGION21_NUM_SEGMENTS, curve[1].segments_num);
+
+ curve += 2;
+ REG_SET_4(CM_SHAPER_RAMB_REGION_22_23, 0,
+ CM_SHAPER_RAMB_EXP_REGION22_LUT_OFFSET, curve[0].offset,
+ CM_SHAPER_RAMB_EXP_REGION22_NUM_SEGMENTS, curve[0].segments_num,
+ CM_SHAPER_RAMB_EXP_REGION23_LUT_OFFSET, curve[1].offset,
+ CM_SHAPER_RAMB_EXP_REGION23_NUM_SEGMENTS, curve[1].segments_num);
+
+ curve += 2;
+ REG_SET_4(CM_SHAPER_RAMB_REGION_24_25, 0,
+ CM_SHAPER_RAMB_EXP_REGION24_LUT_OFFSET, curve[0].offset,
+ CM_SHAPER_RAMB_EXP_REGION24_NUM_SEGMENTS, curve[0].segments_num,
+ CM_SHAPER_RAMB_EXP_REGION25_LUT_OFFSET, curve[1].offset,
+ CM_SHAPER_RAMB_EXP_REGION25_NUM_SEGMENTS, curve[1].segments_num);
+
+ curve += 2;
+ REG_SET_4(CM_SHAPER_RAMB_REGION_26_27, 0,
+ CM_SHAPER_RAMB_EXP_REGION26_LUT_OFFSET, curve[0].offset,
+ CM_SHAPER_RAMB_EXP_REGION26_NUM_SEGMENTS, curve[0].segments_num,
+ CM_SHAPER_RAMB_EXP_REGION27_LUT_OFFSET, curve[1].offset,
+ CM_SHAPER_RAMB_EXP_REGION27_NUM_SEGMENTS, curve[1].segments_num);
+
+ curve += 2;
+ REG_SET_4(CM_SHAPER_RAMB_REGION_28_29, 0,
+ CM_SHAPER_RAMB_EXP_REGION28_LUT_OFFSET, curve[0].offset,
+ CM_SHAPER_RAMB_EXP_REGION28_NUM_SEGMENTS, curve[0].segments_num,
+ CM_SHAPER_RAMB_EXP_REGION29_LUT_OFFSET, curve[1].offset,
+ CM_SHAPER_RAMB_EXP_REGION29_NUM_SEGMENTS, curve[1].segments_num);
+
+ curve += 2;
+ REG_SET_4(CM_SHAPER_RAMB_REGION_30_31, 0,
+ CM_SHAPER_RAMB_EXP_REGION30_LUT_OFFSET, curve[0].offset,
+ CM_SHAPER_RAMB_EXP_REGION30_NUM_SEGMENTS, curve[0].segments_num,
+ CM_SHAPER_RAMB_EXP_REGION31_LUT_OFFSET, curve[1].offset,
+ CM_SHAPER_RAMB_EXP_REGION31_NUM_SEGMENTS, curve[1].segments_num);
+
+ curve += 2;
+ REG_SET_4(CM_SHAPER_RAMB_REGION_32_33, 0,
+ CM_SHAPER_RAMB_EXP_REGION32_LUT_OFFSET, curve[0].offset,
+ CM_SHAPER_RAMB_EXP_REGION32_NUM_SEGMENTS, curve[0].segments_num,
+ CM_SHAPER_RAMB_EXP_REGION33_LUT_OFFSET, curve[1].offset,
+ CM_SHAPER_RAMB_EXP_REGION33_NUM_SEGMENTS, curve[1].segments_num);
+
+}
+
+
+bool dpp3_program_shaper(
+ struct dpp *dpp_base,
+ const struct pwl_params *params)
+{
+ enum dc_lut_mode current_mode;
+ enum dc_lut_mode next_mode;
+
+ struct dcn3_dpp *dpp = TO_DCN30_DPP(dpp_base);
+
+ if (params == NULL) {
+ REG_SET(CM_SHAPER_CONTROL, 0, CM_SHAPER_LUT_MODE, 0);
+ return false;
+ }
+ current_mode = dpp3_get_shaper_current(dpp_base);
+
+ if (current_mode == LUT_BYPASS || current_mode == LUT_RAM_A)
+ next_mode = LUT_RAM_B;
+ else
+ next_mode = LUT_RAM_A;
+
+ dpp3_configure_shaper_lut(dpp_base, next_mode == LUT_RAM_A ? true:false);
+
+ if (next_mode == LUT_RAM_A)
+ dpp3_program_shaper_luta_settings(dpp_base, params);
+ else
+ dpp3_program_shaper_lutb_settings(dpp_base, params);
+
+ dpp3_program_shaper_lut(
+ dpp_base, params->rgb_resulted, params->hw_points_num);
+
+ REG_SET(CM_SHAPER_CONTROL, 0, CM_SHAPER_LUT_MODE, next_mode == LUT_RAM_A ? 1:2);
+
+ return true;
+
+}
+
+static enum dc_lut_mode get3dlut_config(
+ struct dpp *dpp_base,
+ bool *is_17x17x17,
+ bool *is_12bits_color_channel)
+{
+ uint32_t i_mode, i_enable_10bits, lut_size;
+ enum dc_lut_mode mode;
+ struct dcn3_dpp *dpp = TO_DCN30_DPP(dpp_base);
+
+ REG_GET(CM_3DLUT_READ_WRITE_CONTROL,
+ CM_3DLUT_30BIT_EN, &i_enable_10bits);
+ REG_GET(CM_3DLUT_MODE,
+ CM_3DLUT_MODE_CURRENT, &i_mode);
+
+ switch (i_mode) {
+ case 0:
+ mode = LUT_BYPASS;
+ break;
+ case 1:
+ mode = LUT_RAM_A;
+ break;
+ case 2:
+ mode = LUT_RAM_B;
+ break;
+ default:
+ mode = LUT_BYPASS;
+ break;
+ }
+ if (i_enable_10bits > 0)
+ *is_12bits_color_channel = false;
+ else
+ *is_12bits_color_channel = true;
+
+ REG_GET(CM_3DLUT_MODE, CM_3DLUT_SIZE, &lut_size);
+
+ if (lut_size == 0)
+ *is_17x17x17 = true;
+ else
+ *is_17x17x17 = false;
+
+ return mode;
+}
+/*
+ * select ramA or ramB, or bypass
+ * select color channel size 10 or 12 bits
+ * select 3dlut size 17x17x17 or 9x9x9
+ */
+static void dpp3_set_3dlut_mode(
+ struct dpp *dpp_base,
+ enum dc_lut_mode mode,
+ bool is_color_channel_12bits,
+ bool is_lut_size17x17x17)
+{
+ uint32_t lut_mode;
+ struct dcn3_dpp *dpp = TO_DCN30_DPP(dpp_base);
+
+ if (mode == LUT_BYPASS)
+ lut_mode = 0;
+ else if (mode == LUT_RAM_A)
+ lut_mode = 1;
+ else
+ lut_mode = 2;
+
+ REG_UPDATE_2(CM_3DLUT_MODE,
+ CM_3DLUT_MODE, lut_mode,
+ CM_3DLUT_SIZE, is_lut_size17x17x17 == true ? 0 : 1);
+}
+
+static void dpp3_select_3dlut_ram(
+ struct dpp *dpp_base,
+ enum dc_lut_mode mode,
+ bool is_color_channel_12bits)
+{
+ struct dcn3_dpp *dpp = TO_DCN30_DPP(dpp_base);
+
+ REG_UPDATE_2(CM_3DLUT_READ_WRITE_CONTROL,
+ CM_3DLUT_RAM_SEL, mode == LUT_RAM_A ? 0 : 1,
+ CM_3DLUT_30BIT_EN,
+ is_color_channel_12bits == true ? 0:1);
+}
+
+
+
+static void dpp3_set3dlut_ram12(
+ struct dpp *dpp_base,
+ const struct dc_rgb *lut,
+ uint32_t entries)
+{
+ uint32_t i, red, green, blue, red1, green1, blue1;
+ struct dcn3_dpp *dpp = TO_DCN30_DPP(dpp_base);
+
+ for (i = 0 ; i < entries; i += 2) {
+ red = lut[i].red<<4;
+ green = lut[i].green<<4;
+ blue = lut[i].blue<<4;
+ red1 = lut[i+1].red<<4;
+ green1 = lut[i+1].green<<4;
+ blue1 = lut[i+1].blue<<4;
+
+ REG_SET_2(CM_3DLUT_DATA, 0,
+ CM_3DLUT_DATA0, red,
+ CM_3DLUT_DATA1, red1);
+
+ REG_SET_2(CM_3DLUT_DATA, 0,
+ CM_3DLUT_DATA0, green,
+ CM_3DLUT_DATA1, green1);
+
+ REG_SET_2(CM_3DLUT_DATA, 0,
+ CM_3DLUT_DATA0, blue,
+ CM_3DLUT_DATA1, blue1);
+
+ }
+}
+
+/*
+ * load selected lut with 10 bits color channels
+ */
+static void dpp3_set3dlut_ram10(
+ struct dpp *dpp_base,
+ const struct dc_rgb *lut,
+ uint32_t entries)
+{
+ uint32_t i, red, green, blue, value;
+ struct dcn3_dpp *dpp = TO_DCN30_DPP(dpp_base);
+
+ for (i = 0; i < entries; i++) {
+ red = lut[i].red;
+ green = lut[i].green;
+ blue = lut[i].blue;
+
+ value = (red<<20) | (green<<10) | blue;
+
+ REG_SET(CM_3DLUT_DATA_30BIT, 0, CM_3DLUT_DATA_30BIT, value);
+ }
+
+}
+
+
+static void dpp3_select_3dlut_ram_mask(
+ struct dpp *dpp_base,
+ uint32_t ram_selection_mask)
+{
+ struct dcn3_dpp *dpp = TO_DCN30_DPP(dpp_base);
+
+ REG_UPDATE(CM_3DLUT_READ_WRITE_CONTROL, CM_3DLUT_WRITE_EN_MASK,
+ ram_selection_mask);
+ REG_SET(CM_3DLUT_INDEX, 0, CM_3DLUT_INDEX, 0);
+}
+
+bool dpp3_program_3dlut(
+ struct dpp *dpp_base,
+ struct tetrahedral_params *params)
+{
+ enum dc_lut_mode mode;
+ bool is_17x17x17;
+ bool is_12bits_color_channel;
+ struct dc_rgb *lut0;
+ struct dc_rgb *lut1;
+ struct dc_rgb *lut2;
+ struct dc_rgb *lut3;
+ int lut_size0;
+ int lut_size;
+
+ if (params == NULL) {
+ dpp3_set_3dlut_mode(dpp_base, LUT_BYPASS, false, false);
+ return false;
+ }
+ mode = get3dlut_config(dpp_base, &is_17x17x17, &is_12bits_color_channel);
+
+ if (mode == LUT_BYPASS || mode == LUT_RAM_B)
+ mode = LUT_RAM_A;
+ else
+ mode = LUT_RAM_B;
+
+ is_17x17x17 = !params->use_tetrahedral_9;
+ is_12bits_color_channel = params->use_12bits;
+ if (is_17x17x17) {
+ lut0 = params->tetrahedral_17.lut0;
+ lut1 = params->tetrahedral_17.lut1;
+ lut2 = params->tetrahedral_17.lut2;
+ lut3 = params->tetrahedral_17.lut3;
+ lut_size0 = sizeof(params->tetrahedral_17.lut0)/
+ sizeof(params->tetrahedral_17.lut0[0]);
+ lut_size = sizeof(params->tetrahedral_17.lut1)/
+ sizeof(params->tetrahedral_17.lut1[0]);
+ } else {
+ lut0 = params->tetrahedral_9.lut0;
+ lut1 = params->tetrahedral_9.lut1;
+ lut2 = params->tetrahedral_9.lut2;
+ lut3 = params->tetrahedral_9.lut3;
+ lut_size0 = sizeof(params->tetrahedral_9.lut0)/
+ sizeof(params->tetrahedral_9.lut0[0]);
+ lut_size = sizeof(params->tetrahedral_9.lut1)/
+ sizeof(params->tetrahedral_9.lut1[0]);
+ }
+
+ dpp3_select_3dlut_ram(dpp_base, mode,
+ is_12bits_color_channel);
+ dpp3_select_3dlut_ram_mask(dpp_base, 0x1);
+ if (is_12bits_color_channel)
+ dpp3_set3dlut_ram12(dpp_base, lut0, lut_size0);
+ else
+ dpp3_set3dlut_ram10(dpp_base, lut0, lut_size0);
+
+ dpp3_select_3dlut_ram_mask(dpp_base, 0x2);
+ if (is_12bits_color_channel)
+ dpp3_set3dlut_ram12(dpp_base, lut1, lut_size);
+ else
+ dpp3_set3dlut_ram10(dpp_base, lut1, lut_size);
+
+ dpp3_select_3dlut_ram_mask(dpp_base, 0x4);
+ if (is_12bits_color_channel)
+ dpp3_set3dlut_ram12(dpp_base, lut2, lut_size);
+ else
+ dpp3_set3dlut_ram10(dpp_base, lut2, lut_size);
+
+ dpp3_select_3dlut_ram_mask(dpp_base, 0x8);
+ if (is_12bits_color_channel)
+ dpp3_set3dlut_ram12(dpp_base, lut3, lut_size);
+ else
+ dpp3_set3dlut_ram10(dpp_base, lut3, lut_size);
+
+
+ dpp3_set_3dlut_mode(dpp_base, mode, is_12bits_color_channel,
+ is_17x17x17);
+
+ return true;
+}
+static struct dpp_funcs dcn30_dpp_funcs = {
+ .dpp_program_gamcor_lut = dpp3_program_gamcor_lut,
+ .dpp_read_state = dpp30_read_state,
+ .dpp_reset = dpp_reset,
+ .dpp_set_scaler = dpp1_dscl_set_scaler_manual_scale,
+ .dpp_get_optimal_number_of_taps = dpp3_get_optimal_number_of_taps,
+ .dpp_set_gamut_remap = dpp3_cm_set_gamut_remap,
+ .dpp_set_csc_adjustment = NULL,
+ .dpp_set_csc_default = NULL,
+ .dpp_program_regamma_pwl = NULL,
+ .dpp_set_pre_degam = dpp3_set_pre_degam,
+ .dpp_program_input_lut = NULL,
+ .dpp_full_bypass = dpp1_full_bypass,
+ .dpp_setup = dpp3_cnv_setup,
+ .dpp_program_degamma_pwl = NULL,
+ .dpp_program_cm_dealpha = dpp3_program_cm_dealpha,
+ .dpp_program_cm_bias = dpp3_program_cm_bias,
+#if defined(CONFIG_DRM_AMD_DC_DCN3_0)
+ .dpp_program_blnd_lut = dpp3_program_blnd_lut,
+ .dpp_program_shaper_lut = dpp3_program_shaper,
+ .dpp_program_3dlut = dpp3_program_3dlut,
+#else
+ .dpp_program_blnd_lut = NULL,
+ .dpp_program_shaper_lut = NULL,
+ .dpp_program_3dlut = NULL,
+#endif
+
+ .dpp_program_bias_and_scale = NULL,
+ .dpp_cnv_set_alpha_keyer = dpp2_cnv_set_alpha_keyer,
+ .set_cursor_attributes = dpp3_set_cursor_attributes,
+ .set_cursor_position = dpp1_set_cursor_position,
+ .set_optional_cursor_attributes = dpp1_cnv_set_optional_cursor_attributes,
+ .dpp_dppclk_control = dpp1_dppclk_control,
+ .dpp_set_hdr_multiplier = dpp3_set_hdr_multiplier,
+};
+
+
+static struct dpp_caps dcn30_dpp_cap = {
+ .dscl_data_proc_format = DSCL_DATA_PRCESSING_FLOAT_FORMAT,
+ .dscl_calc_lb_num_partitions = dscl2_calc_lb_num_partitions,
+};
+
+bool dpp3_construct(
+ struct dcn3_dpp *dpp,
+ struct dc_context *ctx,
+ uint32_t inst,
+ const struct dcn3_dpp_registers *tf_regs,
+ const struct dcn3_dpp_shift *tf_shift,
+ const struct dcn3_dpp_mask *tf_mask)
+{
+ dpp->base.ctx = ctx;
+
+ dpp->base.inst = inst;
+ dpp->base.funcs = &dcn30_dpp_funcs;
+ dpp->base.caps = &dcn30_dpp_cap;
+
+ dpp->tf_regs = tf_regs;
+ dpp->tf_shift = tf_shift;
+ dpp->tf_mask = tf_mask;
+
+ dpp->lb_pixel_depth_supported =
+ LB_PIXEL_DEPTH_18BPP |
+ LB_PIXEL_DEPTH_24BPP |
+ LB_PIXEL_DEPTH_30BPP;
+
+ dpp->lb_bits_per_entry = LB_BITS_PER_ENTRY;
+ dpp->lb_memory_size = LB_TOTAL_NUMBER_OF_ENTRIES; /*0x1404*/
+
+ return true;
+}
+
diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dpp.h b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dpp.h
new file mode 100644
index 000000000000..7f6bedbc1ff1
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dpp.h
@@ -0,0 +1,608 @@
+/* Copyright 2020 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef __DCN30_DPP_H__
+#define __DCN30_DPP_H__
+
+#include "dcn20/dcn20_dpp.h"
+
+#define TO_DCN30_DPP(dpp)\
+ container_of(dpp, struct dcn3_dpp, base)
+
+#define DPP_REG_LIST_DCN30_COMMON(id)\
+ SRI(CM_DEALPHA, CM, id),\
+ SRI(CM_MEM_PWR_STATUS, CM, id),\
+ SRI(CM_BIAS_CR_R, CM, id),\
+ SRI(CM_BIAS_Y_G_CB_B, CM, id),\
+ SRI(PRE_DEGAM, CNVC_CFG, id),\
+ SRI(CM_GAMCOR_CONTROL, CM, id),\
+ SRI(CM_GAMCOR_LUT_CONTROL, CM, id),\
+ SRI(CM_GAMCOR_LUT_INDEX, CM, id),\
+ SRI(CM_GAMCOR_LUT_INDEX, CM, id),\
+ SRI(CM_GAMCOR_LUT_DATA, CM, id),\
+ SRI(CM_GAMCOR_RAMB_START_CNTL_B, CM, id),\
+ SRI(CM_GAMCOR_RAMB_START_CNTL_G, CM, id),\
+ SRI(CM_GAMCOR_RAMB_START_CNTL_R, CM, id),\
+ SRI(CM_GAMCOR_RAMB_START_SLOPE_CNTL_B, CM, id),\
+ SRI(CM_GAMCOR_RAMB_START_SLOPE_CNTL_G, CM, id),\
+ SRI(CM_GAMCOR_RAMB_START_SLOPE_CNTL_R, CM, id),\
+ SRI(CM_GAMCOR_RAMB_END_CNTL1_B, CM, id),\
+ SRI(CM_GAMCOR_RAMB_END_CNTL2_B, CM, id),\
+ SRI(CM_GAMCOR_RAMB_END_CNTL1_G, CM, id),\
+ SRI(CM_GAMCOR_RAMB_END_CNTL2_G, CM, id),\
+ SRI(CM_GAMCOR_RAMB_END_CNTL1_R, CM, id),\
+ SRI(CM_GAMCOR_RAMB_END_CNTL2_R, CM, id),\
+ SRI(CM_GAMCOR_RAMB_REGION_0_1, CM, id),\
+ SRI(CM_GAMCOR_RAMB_REGION_32_33, CM, id),\
+ SRI(CM_GAMCOR_RAMB_OFFSET_B, CM, id),\
+ SRI(CM_GAMCOR_RAMB_OFFSET_G, CM, id),\
+ SRI(CM_GAMCOR_RAMB_OFFSET_R, CM, id),\
+ SRI(CM_GAMCOR_RAMB_START_BASE_CNTL_B, CM, id),\
+ SRI(CM_GAMCOR_RAMB_START_BASE_CNTL_G, CM, id),\
+ SRI(CM_GAMCOR_RAMB_START_BASE_CNTL_R, CM, id),\
+ SRI(CM_GAMCOR_RAMA_START_CNTL_B, CM, id),\
+ SRI(CM_GAMCOR_RAMA_START_CNTL_G, CM, id),\
+ SRI(CM_GAMCOR_RAMA_START_CNTL_R, CM, id),\
+ SRI(CM_GAMCOR_RAMA_START_SLOPE_CNTL_B, CM, id),\
+ SRI(CM_GAMCOR_RAMA_START_SLOPE_CNTL_G, CM, id),\
+ SRI(CM_GAMCOR_RAMA_START_SLOPE_CNTL_R, CM, id),\
+ SRI(CM_GAMCOR_RAMA_END_CNTL1_B, CM, id),\
+ SRI(CM_GAMCOR_RAMA_END_CNTL2_B, CM, id),\
+ SRI(CM_GAMCOR_RAMA_END_CNTL1_G, CM, id),\
+ SRI(CM_GAMCOR_RAMA_END_CNTL2_G, CM, id),\
+ SRI(CM_GAMCOR_RAMA_END_CNTL1_R, CM, id),\
+ SRI(CM_GAMCOR_RAMA_END_CNTL2_R, CM, id),\
+ SRI(CM_GAMCOR_RAMA_REGION_0_1, CM, id),\
+ SRI(CM_GAMCOR_RAMA_REGION_32_33, CM, id),\
+ SRI(CM_GAMCOR_RAMA_OFFSET_B, CM, id),\
+ SRI(CM_GAMCOR_RAMA_OFFSET_G, CM, id),\
+ SRI(CM_GAMCOR_RAMA_OFFSET_R, CM, id),\
+ SRI(CM_GAMCOR_RAMA_START_BASE_CNTL_B, CM, id),\
+ SRI(CM_GAMCOR_RAMA_START_BASE_CNTL_G, CM, id),\
+ SRI(CM_GAMCOR_RAMA_START_BASE_CNTL_R, CM, id),\
+ SRI(CM_GAMUT_REMAP_CONTROL, CM, id),\
+ SRI(CM_GAMUT_REMAP_C11_C12, CM, id),\
+ SRI(CM_GAMUT_REMAP_C13_C14, CM, id),\
+ SRI(CM_GAMUT_REMAP_C21_C22, CM, id),\
+ SRI(CM_GAMUT_REMAP_C23_C24, CM, id),\
+ SRI(CM_GAMUT_REMAP_C31_C32, CM, id),\
+ SRI(CM_GAMUT_REMAP_C33_C34, CM, id),\
+ SRI(CM_GAMUT_REMAP_B_C11_C12, CM, id),\
+ SRI(CM_GAMUT_REMAP_B_C13_C14, CM, id),\
+ SRI(CM_GAMUT_REMAP_B_C21_C22, CM, id),\
+ SRI(CM_GAMUT_REMAP_B_C23_C24, CM, id),\
+ SRI(CM_GAMUT_REMAP_B_C31_C32, CM, id),\
+ SRI(CM_GAMUT_REMAP_B_C33_C34, CM, id),\
+ SRI(DSCL_EXT_OVERSCAN_LEFT_RIGHT, DSCL, id), \
+ SRI(DSCL_EXT_OVERSCAN_TOP_BOTTOM, DSCL, id), \
+ SRI(OTG_H_BLANK, DSCL, id), \
+ SRI(OTG_V_BLANK, DSCL, id), \
+ SRI(SCL_MODE, DSCL, id), \
+ SRI(LB_DATA_FORMAT, DSCL, id), \
+ SRI(LB_MEMORY_CTRL, DSCL, id), \
+ SRI(DSCL_AUTOCAL, DSCL, id), \
+ SRI(SCL_TAP_CONTROL, DSCL, id), \
+ SRI(SCL_COEF_RAM_TAP_SELECT, DSCL, id), \
+ SRI(SCL_COEF_RAM_TAP_DATA, DSCL, id), \
+ SRI(DSCL_2TAP_CONTROL, DSCL, id), \
+ SRI(MPC_SIZE, DSCL, id), \
+ SRI(SCL_HORZ_FILTER_SCALE_RATIO, DSCL, id), \
+ SRI(SCL_VERT_FILTER_SCALE_RATIO, DSCL, id), \
+ SRI(SCL_HORZ_FILTER_SCALE_RATIO_C, DSCL, id), \
+ SRI(SCL_VERT_FILTER_SCALE_RATIO_C, DSCL, id), \
+ SRI(SCL_HORZ_FILTER_INIT, DSCL, id), \
+ SRI(SCL_HORZ_FILTER_INIT_C, DSCL, id), \
+ SRI(SCL_VERT_FILTER_INIT, DSCL, id), \
+ SRI(SCL_VERT_FILTER_INIT_C, DSCL, id), \
+ SRI(RECOUT_START, DSCL, id), \
+ SRI(RECOUT_SIZE, DSCL, id), \
+ SRI(PRE_DEALPHA, CNVC_CFG, id), \
+ SRI(PRE_REALPHA, CNVC_CFG, id), \
+ SRI(PRE_CSC_MODE, CNVC_CFG, id), \
+ SRI(PRE_CSC_C11_C12, CNVC_CFG, id), \
+ SRI(PRE_CSC_C33_C34, CNVC_CFG, id), \
+ SRI(PRE_CSC_B_C11_C12, CNVC_CFG, id), \
+ SRI(PRE_CSC_B_C33_C34, CNVC_CFG, id), \
+ SRI(CM_POST_CSC_CONTROL, CM, id), \
+ SRI(CM_POST_CSC_C11_C12, CM, id), \
+ SRI(CM_POST_CSC_C33_C34, CM, id), \
+ SRI(CM_POST_CSC_B_C11_C12, CM, id), \
+ SRI(CM_POST_CSC_B_C33_C34, CM, id), \
+ SRI(CM_MEM_PWR_CTRL, CM, id), \
+ SRI(CM_CONTROL, CM, id), \
+ SRI(FORMAT_CONTROL, CNVC_CFG, id), \
+ SRI(CNVC_SURFACE_PIXEL_FORMAT, CNVC_CFG, id), \
+ SRI(CURSOR0_CONTROL, CNVC_CUR, id), \
+ SRI(CURSOR0_COLOR0, CNVC_CUR, id), \
+ SRI(CURSOR0_COLOR1, CNVC_CUR, id), \
+ SRI(CURSOR0_FP_SCALE_BIAS, CNVC_CUR, id), \
+ SRI(DPP_CONTROL, DPP_TOP, id), \
+ SRI(CM_HDR_MULT_COEF, CM, id), \
+ SRI(CURSOR_CONTROL, CURSOR0_, id), \
+ SRI(ALPHA_2BIT_LUT, CNVC_CFG, id), \
+ SRI(FCNV_FP_BIAS_R, CNVC_CFG, id), \
+ SRI(FCNV_FP_BIAS_G, CNVC_CFG, id), \
+ SRI(FCNV_FP_BIAS_B, CNVC_CFG, id), \
+ SRI(FCNV_FP_SCALE_R, CNVC_CFG, id), \
+ SRI(FCNV_FP_SCALE_G, CNVC_CFG, id), \
+ SRI(FCNV_FP_SCALE_B, CNVC_CFG, id), \
+ SRI(COLOR_KEYER_CONTROL, CNVC_CFG, id), \
+ SRI(COLOR_KEYER_ALPHA, CNVC_CFG, id), \
+ SRI(COLOR_KEYER_RED, CNVC_CFG, id), \
+ SRI(COLOR_KEYER_GREEN, CNVC_CFG, id), \
+ SRI(COLOR_KEYER_BLUE, CNVC_CFG, id), \
+ SRI(CURSOR_CONTROL, CURSOR0_, id),\
+ SRI(OBUF_MEM_PWR_CTRL, DSCL, id),\
+ SRI(DSCL_MEM_PWR_CTRL, DSCL, id)
+
+#define DPP_REG_LIST_DCN30(id)\
+ DPP_REG_LIST_DCN30_COMMON(id), \
+ TF_REG_LIST_DCN20_COMMON(id), \
+ SRI(CM_BLNDGAM_CONTROL, CM, id), \
+ SRI(CM_SHAPER_LUT_DATA, CM, id),\
+ SRI(CM_BLNDGAM_RAMA_START_SLOPE_CNTL_B, CM, id),\
+ SRI(CM_BLNDGAM_RAMA_START_SLOPE_CNTL_G, CM, id),\
+ SRI(CM_BLNDGAM_RAMA_START_SLOPE_CNTL_R, CM, id),\
+ SRI(CM_BLNDGAM_RAMB_START_SLOPE_CNTL_B, CM, id),\
+ SRI(CM_BLNDGAM_RAMB_START_SLOPE_CNTL_G, CM, id),\
+ SRI(CM_BLNDGAM_RAMB_START_SLOPE_CNTL_R, CM, id),\
+ SRI(CM_BLNDGAM_LUT_CONTROL, CM, id)
+
+
+
+#define DPP_REG_LIST_SH_MASK_DCN30_COMMON(mask_sh)\
+ TF_SF(CM0_CM_MEM_PWR_STATUS, GAMCOR_MEM_PWR_STATE, mask_sh),\
+ TF_SF(CM0_CM_DEALPHA, CM_DEALPHA_EN, mask_sh),\
+ TF_SF(CM0_CM_DEALPHA, CM_DEALPHA_ABLND, mask_sh),\
+ TF_SF(CM0_CM_BIAS_CR_R, CM_BIAS_CR_R, mask_sh),\
+ TF_SF(CM0_CM_BIAS_Y_G_CB_B, CM_BIAS_Y_G, mask_sh),\
+ TF_SF(CM0_CM_BIAS_Y_G_CB_B, CM_BIAS_CB_B, mask_sh),\
+ TF_SF(CM0_CM_MEM_PWR_CTRL, GAMCOR_MEM_PWR_DIS, mask_sh),\
+ TF_SF(CM0_CM_MEM_PWR_CTRL, GAMCOR_MEM_PWR_FORCE, mask_sh),\
+ TF_SF(CNVC_CFG0_PRE_DEGAM, PRE_DEGAM_MODE, mask_sh),\
+ TF_SF(CNVC_CFG0_PRE_DEGAM, PRE_DEGAM_SELECT, mask_sh),\
+ TF_SF(CM0_CM_GAMCOR_CONTROL, CM_GAMCOR_MODE, mask_sh),\
+ TF_SF(CM0_CM_GAMCOR_CONTROL, CM_GAMCOR_SELECT, mask_sh),\
+ TF_SF(CM0_CM_GAMCOR_CONTROL, CM_GAMCOR_PWL_DISABLE, mask_sh),\
+ TF_SF(CM0_CM_GAMCOR_CONTROL, CM_GAMCOR_MODE_CURRENT, mask_sh),\
+ TF_SF(CM0_CM_GAMCOR_CONTROL, CM_GAMCOR_SELECT_CURRENT, mask_sh),\
+ TF_SF(CM0_CM_GAMCOR_LUT_INDEX, CM_GAMCOR_LUT_INDEX, mask_sh),\
+ TF_SF(CM0_CM_GAMCOR_LUT_DATA, CM_GAMCOR_LUT_DATA, mask_sh),\
+ TF_SF(CM0_CM_GAMCOR_LUT_CONTROL, CM_GAMCOR_LUT_WRITE_COLOR_MASK, mask_sh),\
+ TF_SF(CM0_CM_GAMCOR_LUT_CONTROL, CM_GAMCOR_LUT_READ_COLOR_SEL, mask_sh),\
+ TF_SF(CM0_CM_GAMCOR_LUT_CONTROL, CM_GAMCOR_LUT_READ_DBG, mask_sh),\
+ TF_SF(CM0_CM_GAMCOR_LUT_CONTROL, CM_GAMCOR_LUT_HOST_SEL, mask_sh),\
+ TF_SF(CM0_CM_GAMCOR_LUT_CONTROL, CM_GAMCOR_LUT_CONFIG_MODE, mask_sh),\
+ TF_SF(CM0_CM_GAMCOR_RAMA_START_CNTL_B, CM_GAMCOR_RAMA_EXP_REGION_START_B, mask_sh),\
+ TF_SF(CM0_CM_GAMCOR_RAMA_START_CNTL_B, CM_GAMCOR_RAMA_EXP_REGION_START_SEGMENT_B, mask_sh),\
+ TF_SF(CM0_CM_GAMCOR_RAMA_START_SLOPE_CNTL_B, CM_GAMCOR_RAMA_EXP_REGION_START_SLOPE_B, mask_sh),\
+ TF_SF(CM0_CM_GAMCOR_RAMA_START_BASE_CNTL_B, CM_GAMCOR_RAMA_EXP_REGION_START_BASE_B, mask_sh),\
+ TF_SF(CM0_CM_GAMCOR_RAMA_END_CNTL1_B, CM_GAMCOR_RAMA_EXP_REGION_END_BASE_B, mask_sh),\
+ TF_SF(CM0_CM_GAMCOR_RAMA_END_CNTL2_B, CM_GAMCOR_RAMA_EXP_REGION_END_B, mask_sh),\
+ TF_SF(CM0_CM_GAMCOR_RAMA_END_CNTL2_B, CM_GAMCOR_RAMA_EXP_REGION_END_SLOPE_B, mask_sh),\
+ TF_SF(CM0_CM_GAMCOR_RAMA_OFFSET_B, CM_GAMCOR_RAMA_OFFSET_B, mask_sh),\
+ TF_SF(CM0_CM_GAMCOR_RAMA_REGION_0_1, CM_GAMCOR_RAMA_EXP_REGION0_LUT_OFFSET, mask_sh),\
+ TF_SF(CM0_CM_GAMCOR_RAMA_REGION_0_1, CM_GAMCOR_RAMA_EXP_REGION0_NUM_SEGMENTS, mask_sh),\
+ TF_SF(CM0_CM_GAMCOR_RAMA_REGION_0_1, CM_GAMCOR_RAMA_EXP_REGION1_LUT_OFFSET, mask_sh),\
+ TF_SF(CM0_CM_GAMCOR_RAMA_REGION_0_1, CM_GAMCOR_RAMA_EXP_REGION1_NUM_SEGMENTS, mask_sh),\
+ TF_SF(CM0_CM_GAMUT_REMAP_CONTROL, CM_GAMUT_REMAP_MODE, mask_sh),\
+ TF_SF(CM0_CM_GAMUT_REMAP_CONTROL, CM_GAMUT_REMAP_MODE_CURRENT, mask_sh),\
+ TF_SF(CM0_CM_GAMUT_REMAP_C11_C12, CM_GAMUT_REMAP_C11, mask_sh),\
+ TF_SF(CM0_CM_GAMUT_REMAP_C11_C12, CM_GAMUT_REMAP_C12, mask_sh),\
+ TF_SF(CM0_CM_GAMUT_REMAP_C13_C14, CM_GAMUT_REMAP_C13, mask_sh),\
+ TF_SF(CM0_CM_GAMUT_REMAP_C13_C14, CM_GAMUT_REMAP_C14, mask_sh),\
+ TF_SF(CM0_CM_GAMUT_REMAP_C21_C22, CM_GAMUT_REMAP_C21, mask_sh),\
+ TF_SF(CM0_CM_GAMUT_REMAP_C21_C22, CM_GAMUT_REMAP_C22, mask_sh),\
+ TF_SF(CM0_CM_GAMUT_REMAP_C23_C24, CM_GAMUT_REMAP_C23, mask_sh),\
+ TF_SF(CM0_CM_GAMUT_REMAP_C23_C24, CM_GAMUT_REMAP_C24, mask_sh),\
+ TF_SF(CM0_CM_GAMUT_REMAP_C31_C32, CM_GAMUT_REMAP_C31, mask_sh),\
+ TF_SF(CM0_CM_GAMUT_REMAP_C31_C32, CM_GAMUT_REMAP_C32, mask_sh),\
+ TF_SF(CM0_CM_GAMUT_REMAP_C33_C34, CM_GAMUT_REMAP_C33, mask_sh),\
+ TF_SF(CM0_CM_GAMUT_REMAP_C33_C34, CM_GAMUT_REMAP_C34, mask_sh),\
+ TF_SF(DSCL0_DSCL_EXT_OVERSCAN_LEFT_RIGHT, EXT_OVERSCAN_LEFT, mask_sh),\
+ TF_SF(DSCL0_DSCL_EXT_OVERSCAN_LEFT_RIGHT, EXT_OVERSCAN_RIGHT, mask_sh),\
+ TF_SF(DSCL0_DSCL_EXT_OVERSCAN_TOP_BOTTOM, EXT_OVERSCAN_BOTTOM, mask_sh),\
+ TF_SF(DSCL0_DSCL_EXT_OVERSCAN_TOP_BOTTOM, EXT_OVERSCAN_TOP, mask_sh),\
+ TF_SF(DSCL0_OTG_H_BLANK, OTG_H_BLANK_START, mask_sh),\
+ TF_SF(DSCL0_OTG_H_BLANK, OTG_H_BLANK_END, mask_sh),\
+ TF_SF(DSCL0_OTG_V_BLANK, OTG_V_BLANK_START, mask_sh),\
+ TF_SF(DSCL0_OTG_V_BLANK, OTG_V_BLANK_END, mask_sh),\
+ TF_SF(DSCL0_LB_DATA_FORMAT, INTERLEAVE_EN, mask_sh),\
+ TF2_SF(DSCL0, LB_DATA_FORMAT__ALPHA_EN, mask_sh),\
+ TF_SF(DSCL0_LB_MEMORY_CTRL, MEMORY_CONFIG, mask_sh),\
+ TF_SF(DSCL0_LB_MEMORY_CTRL, LB_MAX_PARTITIONS, mask_sh),\
+ TF_SF(DSCL0_DSCL_AUTOCAL, AUTOCAL_MODE, mask_sh),\
+ TF_SF(DSCL0_DSCL_AUTOCAL, AUTOCAL_NUM_PIPE, mask_sh),\
+ TF_SF(DSCL0_DSCL_AUTOCAL, AUTOCAL_PIPE_ID, mask_sh),\
+ TF_SF(DSCL0_SCL_TAP_CONTROL, SCL_V_NUM_TAPS, mask_sh),\
+ TF_SF(DSCL0_SCL_TAP_CONTROL, SCL_H_NUM_TAPS, mask_sh),\
+ TF_SF(DSCL0_SCL_TAP_CONTROL, SCL_V_NUM_TAPS_C, mask_sh),\
+ TF_SF(DSCL0_SCL_TAP_CONTROL, SCL_H_NUM_TAPS_C, mask_sh),\
+ TF_SF(DSCL0_SCL_COEF_RAM_TAP_SELECT, SCL_COEF_RAM_TAP_PAIR_IDX, mask_sh),\
+ TF_SF(DSCL0_SCL_COEF_RAM_TAP_SELECT, SCL_COEF_RAM_PHASE, mask_sh),\
+ TF_SF(DSCL0_SCL_COEF_RAM_TAP_SELECT, SCL_COEF_RAM_FILTER_TYPE, mask_sh),\
+ TF_SF(DSCL0_SCL_COEF_RAM_TAP_DATA, SCL_COEF_RAM_EVEN_TAP_COEF, mask_sh),\
+ TF_SF(DSCL0_SCL_COEF_RAM_TAP_DATA, SCL_COEF_RAM_EVEN_TAP_COEF_EN, mask_sh),\
+ TF_SF(DSCL0_SCL_COEF_RAM_TAP_DATA, SCL_COEF_RAM_ODD_TAP_COEF, mask_sh),\
+ TF_SF(DSCL0_SCL_COEF_RAM_TAP_DATA, SCL_COEF_RAM_ODD_TAP_COEF_EN, mask_sh),\
+ TF_SF(DSCL0_DSCL_2TAP_CONTROL, SCL_H_2TAP_HARDCODE_COEF_EN, mask_sh),\
+ TF_SF(DSCL0_DSCL_2TAP_CONTROL, SCL_H_2TAP_SHARP_EN, mask_sh),\
+ TF_SF(DSCL0_DSCL_2TAP_CONTROL, SCL_H_2TAP_SHARP_FACTOR, mask_sh),\
+ TF_SF(DSCL0_DSCL_2TAP_CONTROL, SCL_V_2TAP_HARDCODE_COEF_EN, mask_sh),\
+ TF_SF(DSCL0_DSCL_2TAP_CONTROL, SCL_V_2TAP_SHARP_EN, mask_sh),\
+ TF_SF(DSCL0_DSCL_2TAP_CONTROL, SCL_V_2TAP_SHARP_FACTOR, mask_sh),\
+ TF_SF(DSCL0_SCL_MODE, SCL_COEF_RAM_SELECT, mask_sh),\
+ TF_SF(DSCL0_SCL_MODE, DSCL_MODE, mask_sh),\
+ TF_SF(DSCL0_RECOUT_START, RECOUT_START_X, mask_sh),\
+ TF_SF(DSCL0_RECOUT_START, RECOUT_START_Y, mask_sh),\
+ TF_SF(DSCL0_RECOUT_SIZE, RECOUT_WIDTH, mask_sh),\
+ TF_SF(DSCL0_RECOUT_SIZE, RECOUT_HEIGHT, mask_sh),\
+ TF_SF(DSCL0_MPC_SIZE, MPC_WIDTH, mask_sh),\
+ TF_SF(DSCL0_MPC_SIZE, MPC_HEIGHT, mask_sh),\
+ TF_SF(DSCL0_SCL_HORZ_FILTER_SCALE_RATIO, SCL_H_SCALE_RATIO, mask_sh),\
+ TF_SF(DSCL0_SCL_VERT_FILTER_SCALE_RATIO, SCL_V_SCALE_RATIO, mask_sh),\
+ TF_SF(DSCL0_SCL_HORZ_FILTER_SCALE_RATIO_C, SCL_H_SCALE_RATIO_C, mask_sh),\
+ TF_SF(DSCL0_SCL_VERT_FILTER_SCALE_RATIO_C, SCL_V_SCALE_RATIO_C, mask_sh),\
+ TF_SF(DSCL0_SCL_HORZ_FILTER_INIT, SCL_H_INIT_FRAC, mask_sh),\
+ TF_SF(DSCL0_SCL_HORZ_FILTER_INIT, SCL_H_INIT_INT, mask_sh),\
+ TF_SF(DSCL0_SCL_HORZ_FILTER_INIT_C, SCL_H_INIT_FRAC_C, mask_sh),\
+ TF_SF(DSCL0_SCL_HORZ_FILTER_INIT_C, SCL_H_INIT_INT_C, mask_sh),\
+ TF_SF(DSCL0_SCL_VERT_FILTER_INIT, SCL_V_INIT_FRAC, mask_sh),\
+ TF_SF(DSCL0_SCL_VERT_FILTER_INIT, SCL_V_INIT_INT, mask_sh),\
+ TF_SF(DSCL0_SCL_VERT_FILTER_INIT_C, SCL_V_INIT_FRAC_C, mask_sh),\
+ TF_SF(DSCL0_SCL_VERT_FILTER_INIT_C, SCL_V_INIT_INT_C, mask_sh),\
+ TF_SF(DSCL0_SCL_MODE, SCL_CHROMA_COEF_MODE, mask_sh),\
+ TF_SF(DSCL0_SCL_MODE, SCL_COEF_RAM_SELECT_CURRENT, mask_sh), \
+ TF_SF(CNVC_CFG0_PRE_DEALPHA, PRE_DEALPHA_EN, mask_sh), \
+ TF_SF(CNVC_CFG0_PRE_DEALPHA, PRE_DEALPHA_ABLND_EN, mask_sh), \
+ TF_SF(CNVC_CFG0_PRE_REALPHA, PRE_REALPHA_EN, mask_sh), \
+ TF_SF(CNVC_CFG0_PRE_REALPHA, PRE_REALPHA_ABLND_EN, mask_sh), \
+ TF_SF(CNVC_CFG0_PRE_CSC_MODE, PRE_CSC_MODE, mask_sh), \
+ TF_SF(CNVC_CFG0_PRE_CSC_MODE, PRE_CSC_MODE_CURRENT, mask_sh), \
+ TF_SF(CNVC_CFG0_PRE_CSC_C11_C12, PRE_CSC_C11, mask_sh), \
+ TF_SF(CNVC_CFG0_PRE_CSC_C11_C12, PRE_CSC_C12, mask_sh), \
+ TF_SF(CNVC_CFG0_PRE_CSC_C33_C34, PRE_CSC_C33, mask_sh), \
+ TF_SF(CNVC_CFG0_PRE_CSC_C33_C34, PRE_CSC_C34, mask_sh), \
+ TF_SF(CM0_CM_POST_CSC_CONTROL, CM_POST_CSC_MODE, mask_sh), \
+ TF_SF(CM0_CM_POST_CSC_CONTROL, CM_POST_CSC_MODE_CURRENT, mask_sh), \
+ TF_SF(CM0_CM_POST_CSC_C11_C12, CM_POST_CSC_C11, mask_sh), \
+ TF_SF(CM0_CM_POST_CSC_C11_C12, CM_POST_CSC_C12, mask_sh), \
+ TF_SF(CM0_CM_POST_CSC_C33_C34, CM_POST_CSC_C33, mask_sh), \
+ TF_SF(CM0_CM_POST_CSC_C33_C34, CM_POST_CSC_C34, mask_sh), \
+ TF_SF(CNVC_CFG0_FORMAT_CONTROL, CNVC_BYPASS, mask_sh), \
+ TF2_SF(CNVC_CFG0, FORMAT_CONTROL__ALPHA_EN, mask_sh), \
+ TF_SF(CNVC_CFG0_FORMAT_CONTROL, FORMAT_EXPANSION_MODE, mask_sh), \
+ TF_SF(CNVC_CFG0_CNVC_SURFACE_PIXEL_FORMAT, CNVC_SURFACE_PIXEL_FORMAT, mask_sh), \
+ TF_SF(CNVC_CFG0_CNVC_SURFACE_PIXEL_FORMAT, CNVC_ALPHA_PLANE_ENABLE, mask_sh), \
+ TF_SF(CNVC_CUR0_CURSOR0_CONTROL, CUR0_MODE, mask_sh), \
+ TF_SF(CNVC_CUR0_CURSOR0_CONTROL, CUR0_EXPANSION_MODE, mask_sh), \
+ TF_SF(CNVC_CUR0_CURSOR0_CONTROL, CUR0_ENABLE, mask_sh), \
+ TF_SF(CNVC_CUR0_CURSOR0_COLOR0, CUR0_COLOR0, mask_sh), \
+ TF_SF(CNVC_CUR0_CURSOR0_COLOR1, CUR0_COLOR1, mask_sh), \
+ TF_SF(CNVC_CUR0_CURSOR0_FP_SCALE_BIAS, CUR0_FP_BIAS, mask_sh), \
+ TF_SF(CNVC_CUR0_CURSOR0_FP_SCALE_BIAS, CUR0_FP_SCALE, mask_sh), \
+ TF_SF(DPP_TOP0_DPP_CONTROL, DPP_CLOCK_ENABLE, mask_sh), \
+ TF_SF(CM0_CM_HDR_MULT_COEF, CM_HDR_MULT_COEF, mask_sh), \
+ TF_SF(CM0_CM_CONTROL, CM_BYPASS, mask_sh), \
+ TF_SF(CURSOR0_0_CURSOR_CONTROL, CURSOR_MODE, mask_sh), \
+ TF_SF(CURSOR0_0_CURSOR_CONTROL, CURSOR_PITCH, mask_sh), \
+ TF_SF(CURSOR0_0_CURSOR_CONTROL, CURSOR_LINES_PER_CHUNK, mask_sh), \
+ TF_SF(CURSOR0_0_CURSOR_CONTROL, CURSOR_ENABLE, mask_sh), \
+ TF_SF(CNVC_CFG0_FORMAT_CONTROL, FORMAT_CNV16, mask_sh), \
+ TF_SF(CNVC_CFG0_FORMAT_CONTROL, CNVC_BYPASS_MSB_ALIGN, mask_sh), \
+ TF_SF(CNVC_CFG0_FORMAT_CONTROL, CLAMP_POSITIVE, mask_sh), \
+ TF_SF(CNVC_CFG0_FORMAT_CONTROL, CLAMP_POSITIVE_C, mask_sh), \
+ TF_SF(CNVC_CFG0_FORMAT_CONTROL, FORMAT_CROSSBAR_R, mask_sh), \
+ TF_SF(CNVC_CFG0_FORMAT_CONTROL, FORMAT_CROSSBAR_G, mask_sh), \
+ TF_SF(CNVC_CFG0_FORMAT_CONTROL, FORMAT_CROSSBAR_B, mask_sh), \
+ TF_SF(CNVC_CFG0_ALPHA_2BIT_LUT, ALPHA_2BIT_LUT0, mask_sh), \
+ TF_SF(CNVC_CFG0_ALPHA_2BIT_LUT, ALPHA_2BIT_LUT1, mask_sh), \
+ TF_SF(CNVC_CFG0_ALPHA_2BIT_LUT, ALPHA_2BIT_LUT2, mask_sh), \
+ TF_SF(CNVC_CFG0_ALPHA_2BIT_LUT, ALPHA_2BIT_LUT3, mask_sh), \
+ TF_SF(CNVC_CFG0_FCNV_FP_BIAS_R, FCNV_FP_BIAS_R, mask_sh), \
+ TF_SF(CNVC_CFG0_FCNV_FP_BIAS_G, FCNV_FP_BIAS_G, mask_sh), \
+ TF_SF(CNVC_CFG0_FCNV_FP_BIAS_B, FCNV_FP_BIAS_B, mask_sh), \
+ TF_SF(CNVC_CFG0_FCNV_FP_SCALE_R, FCNV_FP_SCALE_R, mask_sh), \
+ TF_SF(CNVC_CFG0_FCNV_FP_SCALE_G, FCNV_FP_SCALE_G, mask_sh), \
+ TF_SF(CNVC_CFG0_FCNV_FP_SCALE_B, FCNV_FP_SCALE_B, mask_sh), \
+ TF_SF(CNVC_CFG0_COLOR_KEYER_CONTROL, COLOR_KEYER_EN, mask_sh), \
+ TF_SF(CNVC_CFG0_COLOR_KEYER_CONTROL, COLOR_KEYER_MODE, mask_sh), \
+ TF_SF(CNVC_CFG0_COLOR_KEYER_ALPHA, COLOR_KEYER_ALPHA_LOW, mask_sh), \
+ TF_SF(CNVC_CFG0_COLOR_KEYER_ALPHA, COLOR_KEYER_ALPHA_HIGH, mask_sh), \
+ TF_SF(CNVC_CFG0_COLOR_KEYER_RED, COLOR_KEYER_RED_LOW, mask_sh), \
+ TF_SF(CNVC_CFG0_COLOR_KEYER_RED, COLOR_KEYER_RED_HIGH, mask_sh), \
+ TF_SF(CNVC_CFG0_COLOR_KEYER_GREEN, COLOR_KEYER_GREEN_LOW, mask_sh), \
+ TF_SF(CNVC_CFG0_COLOR_KEYER_GREEN, COLOR_KEYER_GREEN_HIGH, mask_sh), \
+ TF_SF(CNVC_CFG0_COLOR_KEYER_BLUE, COLOR_KEYER_BLUE_LOW, mask_sh), \
+ TF_SF(CNVC_CFG0_COLOR_KEYER_BLUE, COLOR_KEYER_BLUE_HIGH, mask_sh), \
+ TF_SF(CNVC_CUR0_CURSOR0_CONTROL, CUR0_PIX_INV_MODE, mask_sh), \
+ TF_SF(CNVC_CUR0_CURSOR0_CONTROL, CUR0_PIXEL_ALPHA_MOD_EN, mask_sh), \
+ TF_SF(CNVC_CUR0_CURSOR0_CONTROL, CUR0_ROM_EN, mask_sh),\
+ TF_SF(DSCL0_OBUF_MEM_PWR_CTRL, OBUF_MEM_PWR_FORCE, mask_sh),\
+ TF_SF(DSCL0_DSCL_MEM_PWR_CTRL, LUT_MEM_PWR_FORCE, mask_sh)
+
+#define DPP_REG_LIST_SH_MASK_DCN30_UPDATED(mask_sh)\
+ TF_SF(CM0_CM_BLNDGAM_CONTROL, CM_BLNDGAM_MODE, mask_sh), \
+ TF_SF(CM0_CM_BLNDGAM_CONTROL, CM_BLNDGAM_MODE_CURRENT, mask_sh), \
+ TF_SF(CM0_CM_BLNDGAM_CONTROL, CM_BLNDGAM_SELECT_CURRENT, mask_sh), \
+ TF_SF(CM0_CM_BLNDGAM_CONTROL, CM_BLNDGAM_SELECT, mask_sh), \
+ TF_SF(CM0_CM_BLNDGAM_RAMB_START_SLOPE_CNTL_B, CM_BLNDGAM_RAMB_EXP_REGION_START_SLOPE_B, mask_sh), \
+ TF_SF(CM0_CM_BLNDGAM_RAMB_START_SLOPE_CNTL_G, CM_BLNDGAM_RAMB_EXP_REGION_START_SLOPE_G, mask_sh), \
+ TF_SF(CM0_CM_BLNDGAM_RAMB_START_SLOPE_CNTL_R, CM_BLNDGAM_RAMB_EXP_REGION_START_SLOPE_R, mask_sh), \
+ TF_SF(CM0_CM_BLNDGAM_RAMB_END_CNTL1_B, CM_BLNDGAM_RAMB_EXP_REGION_END_BASE_B, mask_sh), \
+ TF_SF(CM0_CM_BLNDGAM_RAMB_END_CNTL1_G, CM_BLNDGAM_RAMB_EXP_REGION_END_BASE_G, mask_sh), \
+ TF_SF(CM0_CM_BLNDGAM_RAMB_END_CNTL1_R, CM_BLNDGAM_RAMB_EXP_REGION_END_BASE_R, mask_sh), \
+ TF_SF(CM0_CM_BLNDGAM_RAMA_START_SLOPE_CNTL_B, CM_BLNDGAM_RAMA_EXP_REGION_START_SLOPE_B, mask_sh), \
+ TF_SF(CM0_CM_BLNDGAM_RAMA_START_SLOPE_CNTL_G, CM_BLNDGAM_RAMA_EXP_REGION_START_SLOPE_G, mask_sh), \
+ TF_SF(CM0_CM_BLNDGAM_RAMA_START_SLOPE_CNTL_R, CM_BLNDGAM_RAMA_EXP_REGION_START_SLOPE_R, mask_sh), \
+ TF_SF(CM0_CM_BLNDGAM_RAMA_END_CNTL1_B, CM_BLNDGAM_RAMA_EXP_REGION_END_BASE_B, mask_sh), \
+ TF_SF(CM0_CM_BLNDGAM_RAMA_END_CNTL1_G, CM_BLNDGAM_RAMA_EXP_REGION_END_BASE_G, mask_sh), \
+ TF_SF(CM0_CM_BLNDGAM_RAMA_END_CNTL1_R, CM_BLNDGAM_RAMA_EXP_REGION_END_BASE_R, mask_sh), \
+ TF_SF(CM0_CM_BLNDGAM_RAMA_END_CNTL2_B, CM_BLNDGAM_RAMA_EXP_REGION_END_B, mask_sh), \
+ TF_SF(CM0_CM_BLNDGAM_RAMA_END_CNTL2_G, CM_BLNDGAM_RAMA_EXP_REGION_END_G, mask_sh), \
+ TF_SF(CM0_CM_BLNDGAM_RAMA_END_CNTL2_R, CM_BLNDGAM_RAMA_EXP_REGION_END_R, mask_sh), \
+ TF_SF(CM0_CM_BLNDGAM_LUT_CONTROL, CM_BLNDGAM_LUT_WRITE_COLOR_MASK, mask_sh), \
+ TF_SF(CM0_CM_BLNDGAM_LUT_CONTROL, CM_BLNDGAM_LUT_HOST_SEL, mask_sh), \
+ TF_SF(CM0_CM_BLNDGAM_LUT_CONTROL, CM_BLNDGAM_LUT_CONFIG_MODE, mask_sh), \
+ TF_SF(CM0_CM_3DLUT_MODE, CM_3DLUT_MODE_CURRENT, mask_sh), \
+ TF_SF(CM0_CM_SHAPER_CONTROL, CM_SHAPER_MODE_CURRENT, mask_sh)
+
+
+#define DPP_REG_LIST_SH_MASK_DCN30(mask_sh)\
+ DPP_REG_LIST_SH_MASK_DCN30_COMMON(mask_sh), \
+ TF_REG_LIST_SH_MASK_DCN20_COMMON(mask_sh), \
+ DPP_REG_LIST_SH_MASK_DCN30_UPDATED(mask_sh)
+
+#define DPP_REG_FIELD_LIST_DCN3(type) \
+ TF_REG_FIELD_LIST_DCN2_0(type); \
+ type FORMAT_CROSSBAR_R; \
+ type FORMAT_CROSSBAR_G; \
+ type FORMAT_CROSSBAR_B; \
+ type CM_DEALPHA_EN;\
+ type CM_DEALPHA_ABLND;\
+ type CM_BIAS_Y_G;\
+ type CM_BIAS_CB_B;\
+ type CM_BIAS_CR_R;\
+ type GAMCOR_MEM_PWR_DIS; \
+ type GAMCOR_MEM_PWR_FORCE; \
+ type PRE_DEGAM_MODE;\
+ type PRE_DEGAM_SELECT;\
+ type CNVC_ALPHA_PLANE_ENABLE; \
+ type PRE_DEALPHA_EN; \
+ type PRE_DEALPHA_ABLND_EN; \
+ type PRE_REALPHA_EN; \
+ type PRE_REALPHA_ABLND_EN; \
+ type PRE_CSC_MODE; \
+ type PRE_CSC_MODE_CURRENT; \
+ type PRE_CSC_C11; \
+ type PRE_CSC_C12; \
+ type PRE_CSC_C33; \
+ type PRE_CSC_C34; \
+ type CM_POST_CSC_MODE; \
+ type CM_POST_CSC_MODE_CURRENT; \
+ type CM_POST_CSC_C11; \
+ type CM_POST_CSC_C12; \
+ type CM_POST_CSC_C33; \
+ type CM_POST_CSC_C34; \
+ type CM_GAMCOR_MODE; \
+ type CM_GAMCOR_SELECT; \
+ type CM_GAMCOR_PWL_DISABLE; \
+ type CM_GAMCOR_MODE_CURRENT; \
+ type CM_GAMCOR_SELECT_CURRENT; \
+ type CM_GAMCOR_LUT_INDEX; \
+ type CM_GAMCOR_LUT_DATA; \
+ type CM_GAMCOR_LUT_WRITE_COLOR_MASK; \
+ type CM_GAMCOR_LUT_READ_COLOR_SEL; \
+ type CM_GAMCOR_LUT_READ_DBG; \
+ type CM_GAMCOR_LUT_HOST_SEL; \
+ type CM_GAMCOR_LUT_CONFIG_MODE; \
+ type CM_GAMCOR_LUT_STATUS; \
+ type CM_GAMCOR_RAMA_EXP_REGION_START_B; \
+ type CM_GAMCOR_RAMA_EXP_REGION_START_SEGMENT_B; \
+ type CM_GAMCOR_RAMA_EXP_REGION_START_SLOPE_B; \
+ type CM_GAMCOR_RAMA_EXP_REGION_START_BASE_B; \
+ type CM_GAMCOR_RAMA_EXP_REGION_END_BASE_B; \
+ type CM_GAMCOR_RAMA_EXP_REGION_END_B; \
+ type CM_GAMCOR_RAMA_EXP_REGION_END_SLOPE_B; \
+ type CM_GAMCOR_RAMA_OFFSET_B; \
+ type CM_GAMCOR_RAMA_EXP_REGION0_LUT_OFFSET; \
+ type CM_GAMCOR_RAMA_EXP_REGION0_NUM_SEGMENTS; \
+ type CM_GAMCOR_RAMA_EXP_REGION1_LUT_OFFSET; \
+ type CM_GAMCOR_RAMA_EXP_REGION1_NUM_SEGMENTS;\
+ type CM_GAMUT_REMAP_MODE_CURRENT;\
+ type CM_BLNDGAM_RAMB_EXP_REGION_START_SLOPE_B; \
+ type CM_BLNDGAM_RAMB_EXP_REGION_START_SLOPE_G; \
+ type CM_BLNDGAM_RAMB_EXP_REGION_START_SLOPE_R; \
+ type CM_BLNDGAM_RAMA_EXP_REGION_START_SLOPE_B; \
+ type CM_BLNDGAM_RAMA_EXP_REGION_START_SLOPE_G; \
+ type CM_BLNDGAM_RAMA_EXP_REGION_START_SLOPE_R; \
+ type CM_BLNDGAM_LUT_WRITE_COLOR_MASK; \
+ type CM_BLNDGAM_LUT_HOST_SEL; \
+ type CM_BLNDGAM_LUT_CONFIG_MODE; \
+ type CM_3DLUT_MODE_CURRENT; \
+ type CM_SHAPER_MODE_CURRENT; \
+ type CM_BLNDGAM_MODE; \
+ type CM_BLNDGAM_MODE_CURRENT; \
+ type CM_BLNDGAM_SELECT_CURRENT; \
+ type CM_BLNDGAM_SELECT; \
+ type GAMCOR_MEM_PWR_STATE
+
+struct dcn3_dpp_shift {
+ DPP_REG_FIELD_LIST_DCN3(uint8_t);
+};
+
+struct dcn3_dpp_mask {
+ DPP_REG_FIELD_LIST_DCN3(uint32_t);
+};
+
+#define DPP_DCN3_REG_VARIABLE_LIST_COMMON \
+ DPP_DCN2_REG_VARIABLE_LIST; \
+ uint32_t CM_MEM_PWR_STATUS;\
+ uint32_t CM_DEALPHA;\
+ uint32_t CM_BIAS_CR_R;\
+ uint32_t CM_BIAS_Y_G_CB_B;\
+ uint32_t PRE_DEGAM;\
+ uint32_t PRE_DEALPHA; \
+ uint32_t PRE_REALPHA; \
+ uint32_t PRE_CSC_MODE; \
+ uint32_t PRE_CSC_C11_C12; \
+ uint32_t PRE_CSC_C33_C34; \
+ uint32_t PRE_CSC_B_C11_C12; \
+ uint32_t PRE_CSC_B_C33_C34; \
+ uint32_t CM_POST_CSC_CONTROL; \
+ uint32_t CM_POST_CSC_C11_C12; \
+ uint32_t CM_POST_CSC_C33_C34; \
+ uint32_t CM_POST_CSC_B_C11_C12; \
+ uint32_t CM_POST_CSC_B_C33_C34; \
+ uint32_t CM_GAMUT_REMAP_B_C11_C12; \
+ uint32_t CM_GAMUT_REMAP_B_C13_C14; \
+ uint32_t CM_GAMUT_REMAP_B_C21_C22; \
+ uint32_t CM_GAMUT_REMAP_B_C23_C24; \
+ uint32_t CM_GAMUT_REMAP_B_C31_C32; \
+ uint32_t CM_GAMUT_REMAP_B_C33_C34; \
+ uint32_t CM_GAMCOR_CONTROL; \
+ uint32_t CM_GAMCOR_LUT_CONTROL; \
+ uint32_t CM_GAMCOR_LUT_INDEX; \
+ uint32_t CM_GAMCOR_LUT_DATA; \
+ uint32_t CM_GAMCOR_RAMB_START_CNTL_B; \
+ uint32_t CM_GAMCOR_RAMB_START_CNTL_G; \
+ uint32_t CM_GAMCOR_RAMB_START_CNTL_R; \
+ uint32_t CM_GAMCOR_RAMB_START_SLOPE_CNTL_B; \
+ uint32_t CM_GAMCOR_RAMB_START_SLOPE_CNTL_G; \
+ uint32_t CM_GAMCOR_RAMB_START_SLOPE_CNTL_R; \
+ uint32_t CM_GAMCOR_RAMB_END_CNTL1_B; \
+ uint32_t CM_GAMCOR_RAMB_END_CNTL2_B; \
+ uint32_t CM_GAMCOR_RAMB_END_CNTL1_G; \
+ uint32_t CM_GAMCOR_RAMB_END_CNTL2_G; \
+ uint32_t CM_GAMCOR_RAMB_END_CNTL1_R; \
+ uint32_t CM_GAMCOR_RAMB_END_CNTL2_R; \
+ uint32_t CM_GAMCOR_RAMB_REGION_0_1; \
+ uint32_t CM_GAMCOR_RAMB_REGION_32_33; \
+ uint32_t CM_GAMCOR_RAMB_OFFSET_B; \
+ uint32_t CM_GAMCOR_RAMB_OFFSET_G; \
+ uint32_t CM_GAMCOR_RAMB_OFFSET_R; \
+ uint32_t CM_GAMCOR_RAMB_START_BASE_CNTL_B; \
+ uint32_t CM_GAMCOR_RAMB_START_BASE_CNTL_G; \
+ uint32_t CM_GAMCOR_RAMB_START_BASE_CNTL_R; \
+ uint32_t CM_GAMCOR_RAMA_START_CNTL_B; \
+ uint32_t CM_GAMCOR_RAMA_START_CNTL_G; \
+ uint32_t CM_GAMCOR_RAMA_START_CNTL_R; \
+ uint32_t CM_GAMCOR_RAMA_START_SLOPE_CNTL_B; \
+ uint32_t CM_GAMCOR_RAMA_START_SLOPE_CNTL_G; \
+ uint32_t CM_GAMCOR_RAMA_START_SLOPE_CNTL_R; \
+ uint32_t CM_GAMCOR_RAMA_END_CNTL1_B; \
+ uint32_t CM_GAMCOR_RAMA_END_CNTL2_B; \
+ uint32_t CM_GAMCOR_RAMA_END_CNTL1_G; \
+ uint32_t CM_GAMCOR_RAMA_END_CNTL2_G; \
+ uint32_t CM_GAMCOR_RAMA_END_CNTL1_R; \
+ uint32_t CM_GAMCOR_RAMA_END_CNTL2_R; \
+ uint32_t CM_GAMCOR_RAMA_REGION_0_1; \
+ uint32_t CM_GAMCOR_RAMA_REGION_32_33; \
+ uint32_t CM_GAMCOR_RAMA_OFFSET_B; \
+ uint32_t CM_GAMCOR_RAMA_OFFSET_G; \
+ uint32_t CM_GAMCOR_RAMA_OFFSET_R; \
+ uint32_t CM_GAMCOR_RAMA_START_BASE_CNTL_B; \
+ uint32_t CM_GAMCOR_RAMA_START_BASE_CNTL_G; \
+ uint32_t CM_GAMCOR_RAMA_START_BASE_CNTL_R; \
+ uint32_t CM_BLNDGAM_RAMA_START_SLOPE_CNTL_B; \
+ uint32_t CM_BLNDGAM_RAMA_START_SLOPE_CNTL_G; \
+ uint32_t CM_BLNDGAM_RAMA_START_SLOPE_CNTL_R; \
+ uint32_t CM_BLNDGAM_RAMB_START_SLOPE_CNTL_B; \
+ uint32_t CM_BLNDGAM_RAMB_START_SLOPE_CNTL_G; \
+ uint32_t CM_BLNDGAM_RAMB_START_SLOPE_CNTL_R; \
+ uint32_t CM_BLNDGAM_LUT_CONTROL
+
+
+struct dcn3_dpp_registers {
+ DPP_DCN3_REG_VARIABLE_LIST_COMMON;
+};
+
+
+struct dcn3_dpp {
+ struct dpp base;
+
+ const struct dcn3_dpp_registers *tf_regs;
+ const struct dcn3_dpp_shift *tf_shift;
+ const struct dcn3_dpp_mask *tf_mask;
+
+ const uint16_t *filter_v;
+ const uint16_t *filter_h;
+ const uint16_t *filter_v_c;
+ const uint16_t *filter_h_c;
+ int lb_pixel_depth_supported;
+ int lb_memory_size;
+ int lb_bits_per_entry;
+ bool is_write_to_ram_a_safe;
+ struct scaler_data scl_data;
+ struct pwl_params pwl_data;
+};
+
+bool dpp3_construct(struct dcn3_dpp *dpp3,
+ struct dc_context *ctx,
+ uint32_t inst,
+ const struct dcn3_dpp_registers *tf_regs,
+ const struct dcn3_dpp_shift *tf_shift,
+ const struct dcn3_dpp_mask *tf_mask);
+
+bool dpp3_program_gamcor_lut(
+ struct dpp *dpp_base, const struct pwl_params *params);
+
+void dpp3_program_CM_dealpha(
+ struct dpp *dpp_base,
+ uint32_t enable, uint32_t additive_blending);
+
+void dpp3_program_CM_bias(
+ struct dpp *dpp_base,
+ struct CM_bias_params *bias_params);
+
+void dpp3_set_hdr_multiplier(
+ struct dpp *dpp_base,
+ uint32_t multiplier);
+
+void dpp3_cm_set_gamut_remap(
+ struct dpp *dpp_base,
+ const struct dpp_grph_csc_adjustment *adjust);
+
+void dpp3_set_pre_degam(struct dpp *dpp_base,
+ uint32_t degamma_lut_selection);
+
+void dpp3_set_cursor_attributes(
+ struct dpp *dpp_base,
+ struct dc_cursor_attributes *cursor_attributes);
+
+void dpp3_program_post_csc(
+ struct dpp *dpp_base,
+ enum dc_color_space color_space,
+ enum dcn10_input_csc_select input_select,
+ const struct out_csc_color_matrix *tbl_entry);
+
+void dpp3_program_cm_bias(
+ struct dpp *dpp_base,
+ struct CM_bias_params *bias_params);
+
+void dpp3_program_cm_dealpha(
+ struct dpp *dpp_base,
+ uint32_t enable, uint32_t additive_blending);
+
+#endif /* __DC_HWSS_DCN30_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dpp_cm.c b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dpp_cm.c
new file mode 100644
index 000000000000..9ab63c72f21c
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dpp_cm.c
@@ -0,0 +1,410 @@
+/*
+ * Copyright 2020 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#include "dm_services.h"
+#include "core_types.h"
+#include "reg_helper.h"
+#include "dcn30_dpp.h"
+#include "basics/conversion.h"
+#include "dcn30_cm_common.h"
+
+#define REG(reg)\
+ dpp->tf_regs->reg
+
+#define CTX \
+ dpp->base.ctx
+
+#undef FN
+#define FN(reg_name, field_name) \
+ dpp->tf_shift->field_name, dpp->tf_mask->field_name
+
+static void dpp3_enable_cm_block(
+ struct dpp *dpp_base)
+{
+ struct dcn3_dpp *dpp = TO_DCN30_DPP(dpp_base);
+
+ unsigned int cm_bypass_mode = 0;
+
+ // debug option: put CM in bypass mode
+ if (dpp_base->ctx->dc->debug.cm_in_bypass)
+ cm_bypass_mode = 1;
+
+ REG_UPDATE(CM_CONTROL, CM_BYPASS, cm_bypass_mode);
+}
+
+static enum dc_lut_mode dpp30_get_gamcor_current(struct dpp *dpp_base)
+{
+ enum dc_lut_mode mode;
+ uint32_t state_mode;
+ uint32_t lut_mode;
+ struct dcn3_dpp *dpp = TO_DCN30_DPP(dpp_base);
+
+ REG_GET(CM_GAMCOR_CONTROL,
+ CM_GAMCOR_MODE_CURRENT, &state_mode);
+
+ if (state_mode == 0)
+ mode = LUT_BYPASS;
+
+ if (state_mode == 2) {//Programmable RAM LUT
+ REG_GET(CM_GAMCOR_CONTROL,
+ CM_GAMCOR_SELECT_CURRENT, &lut_mode);
+
+ if (lut_mode == 0)
+ mode = LUT_RAM_A;
+ else
+ mode = LUT_RAM_B;
+ }
+
+ return mode;
+}
+
+static void dpp3_program_gammcor_lut(
+ struct dpp *dpp_base,
+ const struct pwl_result_data *rgb,
+ uint32_t num,
+ bool is_ram_a)
+{
+ uint32_t i;
+ struct dcn3_dpp *dpp = TO_DCN30_DPP(dpp_base);
+ uint32_t last_base_value_red = rgb[num-1].red_reg + rgb[num-1].delta_red_reg;
+ uint32_t last_base_value_green = rgb[num-1].green_reg + rgb[num-1].delta_green_reg;
+ uint32_t last_base_value_blue = rgb[num-1].blue_reg + rgb[num-1].delta_blue_reg;
+
+ /*fill in the LUT with all base values to be used by pwl module
+ * HW auto increments the LUT index: back-to-back write
+ */
+ if (is_rgb_equal(rgb, num)) {
+ for (i = 0 ; i < num; i++)
+ REG_SET(CM_GAMCOR_LUT_DATA, 0, CM_GAMCOR_LUT_DATA, rgb[i].red_reg);
+
+ REG_SET(CM_GAMCOR_LUT_DATA, 0, CM_GAMCOR_LUT_DATA, last_base_value_red);
+
+ } else {
+ REG_UPDATE(CM_GAMCOR_LUT_CONTROL,
+ CM_GAMCOR_LUT_WRITE_COLOR_MASK, 4);
+ for (i = 0 ; i < num; i++)
+ REG_SET(CM_GAMCOR_LUT_DATA, 0, CM_GAMCOR_LUT_DATA, rgb[i].red_reg);
+
+ REG_SET(CM_GAMCOR_LUT_DATA, 0, CM_GAMCOR_LUT_DATA, last_base_value_red);
+
+ REG_SET(CM_GAMCOR_LUT_INDEX, 0, CM_GAMCOR_LUT_INDEX, 0);
+
+ REG_UPDATE(CM_GAMCOR_LUT_CONTROL,
+ CM_GAMCOR_LUT_WRITE_COLOR_MASK, 2);
+ for (i = 0 ; i < num; i++)
+ REG_SET(CM_GAMCOR_LUT_DATA, 0, CM_GAMCOR_LUT_DATA, rgb[i].green_reg);
+
+ REG_SET(CM_GAMCOR_LUT_DATA, 0, CM_GAMCOR_LUT_DATA, last_base_value_green);
+
+ REG_SET(CM_GAMCOR_LUT_INDEX, 0, CM_GAMCOR_LUT_INDEX, 0);
+
+ REG_UPDATE(CM_GAMCOR_LUT_CONTROL,
+ CM_GAMCOR_LUT_WRITE_COLOR_MASK, 1);
+ for (i = 0 ; i < num; i++)
+ REG_SET(CM_GAMCOR_LUT_DATA, 0, CM_GAMCOR_LUT_DATA, rgb[i].blue_reg);
+
+ REG_SET(CM_GAMCOR_LUT_DATA, 0, CM_GAMCOR_LUT_DATA, last_base_value_blue);
+ }
+}
+
+static void dpp3_power_on_gamcor_lut(
+ struct dpp *dpp_base,
+ bool power_on)
+{
+ uint32_t power_status;
+ struct dcn3_dpp *dpp = TO_DCN30_DPP(dpp_base);
+
+
+ REG_SET(CM_MEM_PWR_CTRL, 0,
+ GAMCOR_MEM_PWR_DIS, power_on == true ? 0:1);
+
+ REG_GET(CM_MEM_PWR_STATUS, GAMCOR_MEM_PWR_STATE, &power_status);
+ if (power_status != 0)
+ BREAK_TO_DEBUGGER();
+
+
+}
+
+void dpp3_program_cm_dealpha(
+ struct dpp *dpp_base,
+ uint32_t enable, uint32_t additive_blending)
+{
+ struct dcn3_dpp *dpp = TO_DCN30_DPP(dpp_base);
+
+ REG_SET_2(CM_DEALPHA, 0,
+ CM_DEALPHA_EN, enable,
+ CM_DEALPHA_ABLND, additive_blending);
+}
+
+void dpp3_program_cm_bias(
+ struct dpp *dpp_base,
+ struct CM_bias_params *bias_params)
+{
+ struct dcn3_dpp *dpp = TO_DCN30_DPP(dpp_base);
+
+ REG_SET(CM_BIAS_CR_R, 0, CM_BIAS_CR_R, bias_params->cm_bias_cr_r);
+ REG_SET_2(CM_BIAS_Y_G_CB_B, 0,
+ CM_BIAS_Y_G, bias_params->cm_bias_y_g,
+ CM_BIAS_CB_B, bias_params->cm_bias_cb_b);
+}
+
+static void dpp3_gamcor_reg_field(
+ struct dcn3_dpp *dpp,
+ struct dcn3_xfer_func_reg *reg)
+{
+
+ reg->shifts.field_region_start_base = dpp->tf_shift->CM_GAMCOR_RAMA_EXP_REGION_START_BASE_B;
+ reg->masks.field_region_start_base = dpp->tf_mask->CM_GAMCOR_RAMA_EXP_REGION_START_BASE_B;
+ reg->shifts.field_offset = dpp->tf_shift->CM_GAMCOR_RAMA_OFFSET_B;
+ reg->masks.field_offset = dpp->tf_mask->CM_GAMCOR_RAMA_OFFSET_B;
+
+ reg->shifts.exp_region0_lut_offset = dpp->tf_shift->CM_GAMCOR_RAMA_EXP_REGION0_LUT_OFFSET;
+ reg->masks.exp_region0_lut_offset = dpp->tf_mask->CM_GAMCOR_RAMA_EXP_REGION0_LUT_OFFSET;
+ reg->shifts.exp_region0_num_segments = dpp->tf_shift->CM_GAMCOR_RAMA_EXP_REGION0_NUM_SEGMENTS;
+ reg->masks.exp_region0_num_segments = dpp->tf_mask->CM_GAMCOR_RAMA_EXP_REGION0_NUM_SEGMENTS;
+ reg->shifts.exp_region1_lut_offset = dpp->tf_shift->CM_GAMCOR_RAMA_EXP_REGION1_LUT_OFFSET;
+ reg->masks.exp_region1_lut_offset = dpp->tf_mask->CM_GAMCOR_RAMA_EXP_REGION1_LUT_OFFSET;
+ reg->shifts.exp_region1_num_segments = dpp->tf_shift->CM_GAMCOR_RAMA_EXP_REGION1_NUM_SEGMENTS;
+ reg->masks.exp_region1_num_segments = dpp->tf_mask->CM_GAMCOR_RAMA_EXP_REGION1_NUM_SEGMENTS;
+
+ reg->shifts.field_region_end = dpp->tf_shift->CM_GAMCOR_RAMA_EXP_REGION_END_B;
+ reg->masks.field_region_end = dpp->tf_mask->CM_GAMCOR_RAMA_EXP_REGION_END_B;
+ reg->shifts.field_region_end_slope = dpp->tf_shift->CM_GAMCOR_RAMA_EXP_REGION_END_SLOPE_B;
+ reg->masks.field_region_end_slope = dpp->tf_mask->CM_GAMCOR_RAMA_EXP_REGION_END_SLOPE_B;
+ reg->shifts.field_region_end_base = dpp->tf_shift->CM_GAMCOR_RAMA_EXP_REGION_END_BASE_B;
+ reg->masks.field_region_end_base = dpp->tf_mask->CM_GAMCOR_RAMA_EXP_REGION_END_BASE_B;
+ reg->shifts.field_region_linear_slope = dpp->tf_shift->CM_GAMCOR_RAMA_EXP_REGION_START_SLOPE_B;
+ reg->masks.field_region_linear_slope = dpp->tf_mask->CM_GAMCOR_RAMA_EXP_REGION_START_SLOPE_B;
+ reg->shifts.exp_region_start = dpp->tf_shift->CM_GAMCOR_RAMA_EXP_REGION_START_B;
+ reg->masks.exp_region_start = dpp->tf_mask->CM_GAMCOR_RAMA_EXP_REGION_START_B;
+ reg->shifts.exp_resion_start_segment = dpp->tf_shift->CM_GAMCOR_RAMA_EXP_REGION_START_SEGMENT_B;
+ reg->masks.exp_resion_start_segment = dpp->tf_mask->CM_GAMCOR_RAMA_EXP_REGION_START_SEGMENT_B;
+}
+
+static void dpp3_configure_gamcor_lut(
+ struct dpp *dpp_base,
+ bool is_ram_a)
+{
+ struct dcn3_dpp *dpp = TO_DCN30_DPP(dpp_base);
+
+ REG_UPDATE(CM_GAMCOR_LUT_CONTROL,
+ CM_GAMCOR_LUT_WRITE_COLOR_MASK, 7);
+ REG_UPDATE(CM_GAMCOR_LUT_CONTROL,
+ CM_GAMCOR_LUT_HOST_SEL, is_ram_a == true ? 0:1);
+ REG_SET(CM_GAMCOR_LUT_INDEX, 0, CM_GAMCOR_LUT_INDEX, 0);
+}
+
+
+bool dpp3_program_gamcor_lut(
+ struct dpp *dpp_base, const struct pwl_params *params)
+{
+ enum dc_lut_mode current_mode;
+ enum dc_lut_mode next_mode;
+ struct dcn3_dpp *dpp = TO_DCN30_DPP(dpp_base);
+ struct dcn3_xfer_func_reg gam_regs;
+
+ dpp3_enable_cm_block(dpp_base);
+
+ if (params == NULL) { //bypass if we have no pwl data
+ REG_SET(CM_GAMCOR_CONTROL, 0, CM_GAMCOR_MODE, 0);
+ return false;
+ }
+ dpp3_power_on_gamcor_lut(dpp_base, true);
+ REG_SET(CM_GAMCOR_CONTROL, 0, CM_GAMCOR_MODE, 2);
+
+ current_mode = dpp30_get_gamcor_current(dpp_base);
+ if (current_mode == LUT_BYPASS || current_mode == LUT_RAM_A)
+ next_mode = LUT_RAM_B;
+ else
+ next_mode = LUT_RAM_A;
+
+ dpp3_power_on_gamcor_lut(dpp_base, true);
+ dpp3_configure_gamcor_lut(dpp_base, next_mode == LUT_RAM_A ? true:false);
+
+ if (next_mode == LUT_RAM_B) {
+ gam_regs.start_cntl_b = REG(CM_GAMCOR_RAMB_START_CNTL_B);
+ gam_regs.start_cntl_g = REG(CM_GAMCOR_RAMB_START_CNTL_G);
+ gam_regs.start_cntl_r = REG(CM_GAMCOR_RAMB_START_CNTL_R);
+ gam_regs.start_slope_cntl_b = REG(CM_GAMCOR_RAMB_START_SLOPE_CNTL_B);
+ gam_regs.start_slope_cntl_g = REG(CM_GAMCOR_RAMB_START_SLOPE_CNTL_G);
+ gam_regs.start_slope_cntl_r = REG(CM_GAMCOR_RAMB_START_SLOPE_CNTL_R);
+ gam_regs.start_end_cntl1_b = REG(CM_GAMCOR_RAMB_END_CNTL1_B);
+ gam_regs.start_end_cntl2_b = REG(CM_GAMCOR_RAMB_END_CNTL2_B);
+ gam_regs.start_end_cntl1_g = REG(CM_GAMCOR_RAMB_END_CNTL1_G);
+ gam_regs.start_end_cntl2_g = REG(CM_GAMCOR_RAMB_END_CNTL2_G);
+ gam_regs.start_end_cntl1_r = REG(CM_GAMCOR_RAMB_END_CNTL1_R);
+ gam_regs.start_end_cntl2_r = REG(CM_GAMCOR_RAMB_END_CNTL2_R);
+ gam_regs.region_start = REG(CM_GAMCOR_RAMB_REGION_0_1);
+ gam_regs.region_end = REG(CM_GAMCOR_RAMB_REGION_32_33);
+ //New registers in DCN3AG/DCN GAMCOR block
+ gam_regs.offset_b = REG(CM_GAMCOR_RAMB_OFFSET_B);
+ gam_regs.offset_g = REG(CM_GAMCOR_RAMB_OFFSET_G);
+ gam_regs.offset_r = REG(CM_GAMCOR_RAMB_OFFSET_R);
+ gam_regs.start_base_cntl_b = REG(CM_GAMCOR_RAMB_START_BASE_CNTL_B);
+ gam_regs.start_base_cntl_g = REG(CM_GAMCOR_RAMB_START_BASE_CNTL_G);
+ gam_regs.start_base_cntl_r = REG(CM_GAMCOR_RAMB_START_BASE_CNTL_R);
+ } else {
+ gam_regs.start_cntl_b = REG(CM_GAMCOR_RAMA_START_CNTL_B);
+ gam_regs.start_cntl_g = REG(CM_GAMCOR_RAMA_START_CNTL_G);
+ gam_regs.start_cntl_r = REG(CM_GAMCOR_RAMA_START_CNTL_R);
+ gam_regs.start_slope_cntl_b = REG(CM_GAMCOR_RAMA_START_SLOPE_CNTL_B);
+ gam_regs.start_slope_cntl_g = REG(CM_GAMCOR_RAMA_START_SLOPE_CNTL_G);
+ gam_regs.start_slope_cntl_r = REG(CM_GAMCOR_RAMA_START_SLOPE_CNTL_R);
+ gam_regs.start_end_cntl1_b = REG(CM_GAMCOR_RAMA_END_CNTL1_B);
+ gam_regs.start_end_cntl2_b = REG(CM_GAMCOR_RAMA_END_CNTL2_B);
+ gam_regs.start_end_cntl1_g = REG(CM_GAMCOR_RAMA_END_CNTL1_G);
+ gam_regs.start_end_cntl2_g = REG(CM_GAMCOR_RAMA_END_CNTL2_G);
+ gam_regs.start_end_cntl1_r = REG(CM_GAMCOR_RAMA_END_CNTL1_R);
+ gam_regs.start_end_cntl2_r = REG(CM_GAMCOR_RAMA_END_CNTL2_R);
+ gam_regs.region_start = REG(CM_GAMCOR_RAMA_REGION_0_1);
+ gam_regs.region_end = REG(CM_GAMCOR_RAMA_REGION_32_33);
+ //New registers in DCN3AG/DCN GAMCOR block
+ gam_regs.offset_b = REG(CM_GAMCOR_RAMA_OFFSET_B);
+ gam_regs.offset_g = REG(CM_GAMCOR_RAMA_OFFSET_G);
+ gam_regs.offset_r = REG(CM_GAMCOR_RAMA_OFFSET_R);
+ gam_regs.start_base_cntl_b = REG(CM_GAMCOR_RAMA_START_BASE_CNTL_B);
+ gam_regs.start_base_cntl_g = REG(CM_GAMCOR_RAMA_START_BASE_CNTL_G);
+ gam_regs.start_base_cntl_r = REG(CM_GAMCOR_RAMA_START_BASE_CNTL_R);
+ }
+
+ //get register fields
+ dpp3_gamcor_reg_field(dpp, &gam_regs);
+
+ //program register set for LUTA/LUTB
+ cm_helper_program_gamcor_xfer_func(dpp_base->ctx, params, &gam_regs);
+
+ dpp3_program_gammcor_lut(dpp_base, params->rgb_resulted, params->hw_points_num,
+ next_mode == LUT_RAM_A ? true:false);
+
+ //select Gamma LUT to use for next frame
+ REG_UPDATE(CM_GAMCOR_CONTROL, CM_GAMCOR_SELECT, next_mode == LUT_RAM_A ? 0:1);
+
+ return true;
+}
+
+void dpp3_set_hdr_multiplier(
+ struct dpp *dpp_base,
+ uint32_t multiplier)
+{
+ struct dcn3_dpp *dpp = TO_DCN30_DPP(dpp_base);
+
+ REG_UPDATE(CM_HDR_MULT_COEF, CM_HDR_MULT_COEF, multiplier);
+}
+
+
+static void program_gamut_remap(
+ struct dcn3_dpp *dpp,
+ const uint16_t *regval,
+ int select)
+{
+ uint16_t selection = 0;
+ struct color_matrices_reg gam_regs;
+
+ if (regval == NULL || select == GAMUT_REMAP_BYPASS) {
+ REG_SET(CM_GAMUT_REMAP_CONTROL, 0,
+ CM_GAMUT_REMAP_MODE, 0);
+ return;
+ }
+ switch (select) {
+ case GAMUT_REMAP_COEFF:
+ selection = 1;
+ break;
+ /*this corresponds to GAMUT_REMAP coefficients set B
+ *we don't have common coefficient sets in dcn3ag/dcn3
+ */
+ case GAMUT_REMAP_COMA_COEFF:
+ selection = 2;
+ break;
+ default:
+ break;
+ }
+
+ gam_regs.shifts.csc_c11 = dpp->tf_shift->CM_GAMUT_REMAP_C11;
+ gam_regs.masks.csc_c11 = dpp->tf_mask->CM_GAMUT_REMAP_C11;
+ gam_regs.shifts.csc_c12 = dpp->tf_shift->CM_GAMUT_REMAP_C12;
+ gam_regs.masks.csc_c12 = dpp->tf_mask->CM_GAMUT_REMAP_C12;
+
+
+ if (select == GAMUT_REMAP_COEFF) {
+ gam_regs.csc_c11_c12 = REG(CM_GAMUT_REMAP_C11_C12);
+ gam_regs.csc_c33_c34 = REG(CM_GAMUT_REMAP_C33_C34);
+
+ cm_helper_program_color_matrices(
+ dpp->base.ctx,
+ regval,
+ &gam_regs);
+
+ } else if (select == GAMUT_REMAP_COMA_COEFF) {
+
+ gam_regs.csc_c11_c12 = REG(CM_GAMUT_REMAP_B_C11_C12);
+ gam_regs.csc_c33_c34 = REG(CM_GAMUT_REMAP_B_C33_C34);
+
+ cm_helper_program_color_matrices(
+ dpp->base.ctx,
+ regval,
+ &gam_regs);
+
+ }
+ //select coefficient set to use
+ REG_SET(
+ CM_GAMUT_REMAP_CONTROL, 0,
+ CM_GAMUT_REMAP_MODE, selection);
+}
+
+void dpp3_cm_set_gamut_remap(
+ struct dpp *dpp_base,
+ const struct dpp_grph_csc_adjustment *adjust)
+{
+ struct dcn3_dpp *dpp = TO_DCN30_DPP(dpp_base);
+ int i = 0;
+ int gamut_mode;
+
+ if (adjust->gamut_adjust_type != GRAPHICS_GAMUT_ADJUST_TYPE_SW)
+ /* Bypass if type is bypass or hw */
+ program_gamut_remap(dpp, NULL, GAMUT_REMAP_BYPASS);
+ else {
+ struct fixed31_32 arr_matrix[12];
+ uint16_t arr_reg_val[12];
+
+ for (i = 0; i < 12; i++)
+ arr_matrix[i] = adjust->temperature_matrix[i];
+
+ convert_float_matrix(
+ arr_reg_val, arr_matrix, 12);
+
+ //current coefficient set in use
+ REG_GET(CM_GAMUT_REMAP_CONTROL, CM_GAMUT_REMAP_MODE_CURRENT, &gamut_mode);
+
+ if (gamut_mode == 0)
+ gamut_mode = 1; //use coefficient set A
+ else if (gamut_mode == 1)
+ gamut_mode = 2;
+ else
+ gamut_mode = 1;
+
+ //follow dcn2 approach for now - using only coefficient set A
+ program_gamut_remap(dpp, arr_reg_val, GAMUT_REMAP_COEFF);
+ }
+}
diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dwb.c b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dwb.c
new file mode 100644
index 000000000000..f14f69616692
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dwb.c
@@ -0,0 +1,264 @@
+/*
+ * Copyright 2020 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+
+#include "reg_helper.h"
+#include "resource.h"
+#include "dwb.h"
+#include "dcn30_dwb.h"
+
+
+#define REG(reg)\
+ dwbc30->dwbc_regs->reg
+
+#define CTX \
+ dwbc30->base.ctx
+
+#undef FN
+#define FN(reg_name, field_name) \
+ dwbc30->dwbc_shift->field_name, dwbc30->dwbc_mask->field_name
+
+#define DC_LOGGER \
+ dwbc30->base.ctx->logger
+
+static bool dwb3_get_caps(struct dwbc *dwbc, struct dwb_caps *caps)
+{
+ if (caps) {
+ caps->adapter_id = 0; /* we only support 1 adapter currently */
+ caps->hw_version = DCN_VERSION_3_0;
+ caps->num_pipes = 2;
+ memset(&caps->reserved, 0, sizeof(caps->reserved));
+ memset(&caps->reserved2, 0, sizeof(caps->reserved2));
+ caps->sw_version = dwb_ver_2_0;
+ caps->caps.support_dwb = true;
+ caps->caps.support_ogam = true;
+ caps->caps.support_wbscl = true;
+ caps->caps.support_ocsc = false;
+ caps->caps.support_stereo = true;
+ return true;
+ } else {
+ return false;
+ }
+}
+
+void dwb3_config_fc(struct dwbc *dwbc, struct dc_dwb_params *params)
+{
+ struct dcn30_dwbc *dwbc30 = TO_DCN30_DWBC(dwbc);
+
+ /* Set DWB source size */
+ REG_UPDATE_2(FC_SOURCE_SIZE, FC_SOURCE_WIDTH, params->cnv_params.src_width,
+ FC_SOURCE_HEIGHT, params->cnv_params.src_height);
+
+ /* source size is not equal the source size, then enable cropping. */
+ if (params->cnv_params.crop_en) {
+ REG_UPDATE(FC_MODE_CTRL, FC_WINDOW_CROP_EN, 1);
+ REG_UPDATE(FC_WINDOW_START, FC_WINDOW_START_X, params->cnv_params.crop_x);
+ REG_UPDATE(FC_WINDOW_START, FC_WINDOW_START_Y, params->cnv_params.crop_y);
+ REG_UPDATE(FC_WINDOW_SIZE, FC_WINDOW_WIDTH, params->cnv_params.crop_width);
+ REG_UPDATE(FC_WINDOW_SIZE, FC_WINDOW_HEIGHT, params->cnv_params.crop_height);
+ } else {
+ REG_UPDATE(FC_MODE_CTRL, FC_WINDOW_CROP_EN, 0);
+ }
+
+ /* Set CAPTURE_RATE */
+ REG_UPDATE(FC_MODE_CTRL, FC_FRAME_CAPTURE_RATE, params->capture_rate);
+
+ dwb3_set_stereo(dwbc, &params->stereo_params);
+}
+
+bool dwb3_enable(struct dwbc *dwbc, struct dc_dwb_params *params)
+{
+ struct dcn30_dwbc *dwbc30 = TO_DCN30_DWBC(dwbc);
+ DC_LOG_DWB("%s dwb3_enabled at inst = %d", __func__, dwbc->inst);
+
+ /* Set WB_ENABLE (not double buffered; capture not enabled) */
+ REG_UPDATE(DWB_ENABLE_CLK_CTRL, DWB_ENABLE, 1);
+
+ /* Set FC parameters */
+ dwb3_config_fc(dwbc, params);
+
+ /* Program color processing unit */
+ dwb3_program_hdr_mult(dwbc, params);
+ dwb3_set_gamut_remap(dwbc, params);
+ dwb3_ogam_set_input_transfer_func(dwbc, params->out_transfer_func);
+
+ /* Program output denorm */
+ dwb3_set_denorm(dwbc, params);
+
+ /* Enable DWB capture enable (double buffered) */
+ REG_UPDATE(FC_MODE_CTRL, FC_FRAME_CAPTURE_EN, DWB_FRAME_CAPTURE_ENABLE);
+
+ /* First pixel count */
+ REG_UPDATE(FC_FLOW_CTRL, FC_FIRST_PIXEL_DELAY_COUNT, 96);
+
+ return true;
+}
+
+bool dwb3_disable(struct dwbc *dwbc)
+{
+ struct dcn30_dwbc *dwbc30 = TO_DCN30_DWBC(dwbc);
+
+ /* disable FC */
+ REG_UPDATE(FC_MODE_CTRL, FC_FRAME_CAPTURE_EN, DWB_FRAME_CAPTURE_DISABLE);
+
+ /* disable WB */
+ REG_UPDATE(DWB_ENABLE_CLK_CTRL, DWB_ENABLE, 0);
+
+ DC_LOG_DWB("%s dwb3_disabled at inst = %d", __func__, dwbc->inst);
+ return true;
+}
+
+bool dwb3_update(struct dwbc *dwbc, struct dc_dwb_params *params)
+{
+ struct dcn30_dwbc *dwbc30 = TO_DCN30_DWBC(dwbc);
+ unsigned int pre_locked;
+
+ /*
+ * Check if the caller has already locked DWB registers.
+ * If so: assume the caller will unlock, so don't touch the lock.
+ * If not: lock them for this update, then unlock after the
+ * update is complete.
+ */
+ REG_GET(DWB_UPDATE_CTRL, DWB_UPDATE_LOCK, &pre_locked);
+ DC_LOG_DWB("%s dwb update, inst = %d", __func__, dwbc->inst);
+
+ if (pre_locked == 0) {
+ /* Lock DWB registers */
+ REG_UPDATE(DWB_UPDATE_CTRL, DWB_UPDATE_LOCK, 1);
+ }
+
+ /* Set FC parameters */
+ dwb3_config_fc(dwbc, params);
+
+ /* Program color processing unit */
+ dwb3_program_hdr_mult(dwbc, params);
+ dwb3_set_gamut_remap(dwbc, params);
+ dwb3_ogam_set_input_transfer_func(dwbc, params->out_transfer_func);
+
+ /* Program output denorm */
+ dwb3_set_denorm(dwbc, params);
+
+ if (pre_locked == 0) {
+ /* Unlock DWB registers */
+ REG_UPDATE(DWB_UPDATE_CTRL, DWB_UPDATE_LOCK, 0);
+ }
+
+ return true;
+}
+
+bool dwb3_is_enabled(struct dwbc *dwbc)
+{
+ struct dcn30_dwbc *dwbc30 = TO_DCN30_DWBC(dwbc);
+ unsigned int dwb_enabled = 0;
+ unsigned int fc_frame_capture_en = 0;
+
+ REG_GET(DWB_ENABLE_CLK_CTRL, DWB_ENABLE, &dwb_enabled);
+ REG_GET(FC_MODE_CTRL, FC_FRAME_CAPTURE_EN, &fc_frame_capture_en);
+
+ return ((dwb_enabled != 0) && (fc_frame_capture_en != 0));
+}
+
+void dwb3_set_stereo(struct dwbc *dwbc,
+ struct dwb_stereo_params *stereo_params)
+{
+ struct dcn30_dwbc *dwbc30 = TO_DCN30_DWBC(dwbc);
+
+ if (stereo_params->stereo_enabled) {
+ REG_UPDATE(FC_MODE_CTRL, FC_EYE_SELECTION, stereo_params->stereo_eye_select);
+ REG_UPDATE(FC_MODE_CTRL, FC_STEREO_EYE_POLARITY, stereo_params->stereo_polarity);
+ DC_LOG_DWB("%s dwb stereo enabled", __func__);
+ } else {
+ REG_UPDATE(FC_MODE_CTRL, FC_EYE_SELECTION, 0);
+ DC_LOG_DWB("%s dwb stereo disabled", __func__);
+ }
+}
+
+void dwb3_set_new_content(struct dwbc *dwbc,
+ bool is_new_content)
+{
+ struct dcn30_dwbc *dwbc30 = TO_DCN30_DWBC(dwbc);
+
+ REG_UPDATE(FC_MODE_CTRL, FC_NEW_CONTENT, is_new_content);
+}
+
+void dwb3_set_denorm(struct dwbc *dwbc, struct dc_dwb_params *params)
+{
+ struct dcn30_dwbc *dwbc30 = TO_DCN30_DWBC(dwbc);
+
+ /* Set output format*/
+ REG_UPDATE(DWB_OUT_CTRL, OUT_FORMAT, params->cnv_params.fc_out_format);
+
+ /* Set output denorm */
+ if (params->cnv_params.fc_out_format == DWB_OUT_FORMAT_32BPP_ARGB ||
+ params->cnv_params.fc_out_format == DWB_OUT_FORMAT_32BPP_RGBA) {
+ REG_UPDATE(DWB_OUT_CTRL, OUT_DENORM, params->cnv_params.out_denorm_mode);
+ REG_UPDATE(DWB_OUT_CTRL, OUT_MAX, params->cnv_params.out_max_pix_val);
+ REG_UPDATE(DWB_OUT_CTRL, OUT_MIN, params->cnv_params.out_min_pix_val);
+ }
+}
+
+
+const struct dwbc_funcs dcn30_dwbc_funcs = {
+ .get_caps = dwb3_get_caps,
+ .enable = dwb3_enable,
+ .disable = dwb3_disable,
+ .update = dwb3_update,
+ .is_enabled = dwb3_is_enabled,
+ .set_stereo = dwb3_set_stereo,
+ .set_new_content = dwb3_set_new_content,
+ .dwb_program_output_csc = NULL,
+ .dwb_ogam_set_input_transfer_func = dwb3_ogam_set_input_transfer_func, //TODO: rename
+ .dwb_set_scaler = NULL,
+};
+
+void dcn30_dwbc_construct(struct dcn30_dwbc *dwbc30,
+ struct dc_context *ctx,
+ const struct dcn30_dwbc_registers *dwbc_regs,
+ const struct dcn30_dwbc_shift *dwbc_shift,
+ const struct dcn30_dwbc_mask *dwbc_mask,
+ int inst)
+{
+ dwbc30->base.ctx = ctx;
+
+ dwbc30->base.inst = inst;
+ dwbc30->base.funcs = &dcn30_dwbc_funcs;
+
+ dwbc30->dwbc_regs = dwbc_regs;
+ dwbc30->dwbc_shift = dwbc_shift;
+ dwbc30->dwbc_mask = dwbc_mask;
+}
+
+void dwb3_set_host_read_rate_control(struct dwbc *dwbc, bool host_read_delay)
+{
+ struct dcn30_dwbc *dwbc30 = TO_DCN30_DWBC(dwbc);
+
+ /*
+ * Set maximum delay of host read access to DWBSCL LUT or OGAM LUT if there are no
+ * idle cycles in HW pipeline (in number of clock cycles times 4)
+ */
+ REG_UPDATE(DWB_HOST_READ_CONTROL, DWB_HOST_READ_RATE_CONTROL, host_read_delay);
+
+ DC_LOG_DWB("%s dwb3_rate_control at inst = %d", __func__, dwbc->inst);
+}
diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dwb.h b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dwb.h
new file mode 100644
index 000000000000..1010930cf071
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dwb.h
@@ -0,0 +1,923 @@
+/* Copyright 2020 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+#ifndef __DC_DWBC_DCN30_H__
+#define __DC_DWBC_DCN30_H__
+
+#define TO_DCN30_DWBC(dwbc_base) \
+ container_of(dwbc_base, struct dcn30_dwbc, base)
+
+/* DCN */
+#define BASE_INNER(seg) \
+ DCE_BASE__INST0_SEG ## seg
+
+#define BASE(seg) \
+ BASE_INNER(seg)
+
+#define SF_DWB(reg_name, block, id, field_name, post_fix)\
+ .field_name = block ## id ## _ ## reg_name ## __ ## field_name ## post_fix
+
+ /* set field name */
+#define SF_DWB2(reg_name, block, id, field_name, post_fix)\
+ .field_name = reg_name ## __ ## field_name ## post_fix
+
+
+#define DWBC_COMMON_REG_LIST_DCN30(inst) \
+ SR(DWB_ENABLE_CLK_CTRL),\
+ SR(DWB_MEM_PWR_CTRL),\
+ SR(FC_MODE_CTRL),\
+ SR(FC_FLOW_CTRL),\
+ SR(FC_WINDOW_START),\
+ SR(FC_WINDOW_SIZE),\
+ SR(FC_SOURCE_SIZE),\
+ SR(DWB_UPDATE_CTRL),\
+ SR(DWB_CRC_CTRL),\
+ SR(DWB_CRC_MASK_R_G),\
+ SR(DWB_CRC_MASK_B_A),\
+ SR(DWB_CRC_VAL_R_G),\
+ SR(DWB_CRC_VAL_B_A),\
+ SR(DWB_OUT_CTRL),\
+ SR(DWB_MMHUBBUB_BACKPRESSURE_CNT_EN),\
+ SR(DWB_MMHUBBUB_BACKPRESSURE_CNT),\
+ SR(DWB_HOST_READ_CONTROL),\
+ SR(DWB_SOFT_RESET),\
+ SR(DWB_HDR_MULT_COEF),\
+ SR(DWB_GAMUT_REMAP_MODE),\
+ SR(DWB_GAMUT_REMAP_COEF_FORMAT),\
+ SR(DWB_GAMUT_REMAPA_C11_C12),\
+ SR(DWB_GAMUT_REMAPA_C13_C14),\
+ SR(DWB_GAMUT_REMAPA_C21_C22),\
+ SR(DWB_GAMUT_REMAPA_C23_C24),\
+ SR(DWB_GAMUT_REMAPA_C31_C32),\
+ SR(DWB_GAMUT_REMAPA_C33_C34),\
+ SR(DWB_GAMUT_REMAPB_C11_C12),\
+ SR(DWB_GAMUT_REMAPB_C13_C14),\
+ SR(DWB_GAMUT_REMAPB_C21_C22),\
+ SR(DWB_GAMUT_REMAPB_C23_C24),\
+ SR(DWB_GAMUT_REMAPB_C31_C32),\
+ SR(DWB_GAMUT_REMAPB_C33_C34),\
+ SR(DWB_OGAM_CONTROL),\
+ SR(DWB_OGAM_LUT_INDEX),\
+ SR(DWB_OGAM_LUT_DATA),\
+ SR(DWB_OGAM_LUT_CONTROL),\
+ SR(DWB_OGAM_RAMA_START_CNTL_B),\
+ SR(DWB_OGAM_RAMA_START_CNTL_G),\
+ SR(DWB_OGAM_RAMA_START_CNTL_R),\
+ SR(DWB_OGAM_RAMA_START_BASE_CNTL_B),\
+ SR(DWB_OGAM_RAMA_START_SLOPE_CNTL_B),\
+ SR(DWB_OGAM_RAMA_START_BASE_CNTL_G),\
+ SR(DWB_OGAM_RAMA_START_SLOPE_CNTL_G),\
+ SR(DWB_OGAM_RAMA_START_BASE_CNTL_R),\
+ SR(DWB_OGAM_RAMA_START_SLOPE_CNTL_R),\
+ SR(DWB_OGAM_RAMA_END_CNTL1_B),\
+ SR(DWB_OGAM_RAMA_END_CNTL2_B),\
+ SR(DWB_OGAM_RAMA_END_CNTL1_G),\
+ SR(DWB_OGAM_RAMA_END_CNTL2_G),\
+ SR(DWB_OGAM_RAMA_END_CNTL1_R),\
+ SR(DWB_OGAM_RAMA_END_CNTL2_R),\
+ SR(DWB_OGAM_RAMA_OFFSET_B),\
+ SR(DWB_OGAM_RAMA_OFFSET_G),\
+ SR(DWB_OGAM_RAMA_OFFSET_R),\
+ SR(DWB_OGAM_RAMA_REGION_0_1),\
+ SR(DWB_OGAM_RAMA_REGION_2_3),\
+ SR(DWB_OGAM_RAMA_REGION_4_5),\
+ SR(DWB_OGAM_RAMA_REGION_6_7),\
+ SR(DWB_OGAM_RAMA_REGION_8_9),\
+ SR(DWB_OGAM_RAMA_REGION_10_11),\
+ SR(DWB_OGAM_RAMA_REGION_12_13),\
+ SR(DWB_OGAM_RAMA_REGION_14_15),\
+ SR(DWB_OGAM_RAMA_REGION_16_17),\
+ SR(DWB_OGAM_RAMA_REGION_18_19),\
+ SR(DWB_OGAM_RAMA_REGION_20_21),\
+ SR(DWB_OGAM_RAMA_REGION_22_23),\
+ SR(DWB_OGAM_RAMA_REGION_24_25),\
+ SR(DWB_OGAM_RAMA_REGION_26_27),\
+ SR(DWB_OGAM_RAMA_REGION_28_29),\
+ SR(DWB_OGAM_RAMA_REGION_30_31),\
+ SR(DWB_OGAM_RAMA_REGION_32_33),\
+ SR(DWB_OGAM_RAMB_START_CNTL_B),\
+ SR(DWB_OGAM_RAMB_START_CNTL_G),\
+ SR(DWB_OGAM_RAMB_START_CNTL_R),\
+ SR(DWB_OGAM_RAMB_START_BASE_CNTL_B),\
+ SR(DWB_OGAM_RAMB_START_SLOPE_CNTL_B),\
+ SR(DWB_OGAM_RAMB_START_BASE_CNTL_G),\
+ SR(DWB_OGAM_RAMB_START_SLOPE_CNTL_G),\
+ SR(DWB_OGAM_RAMB_START_BASE_CNTL_R),\
+ SR(DWB_OGAM_RAMB_START_SLOPE_CNTL_R),\
+ SR(DWB_OGAM_RAMB_END_CNTL1_B),\
+ SR(DWB_OGAM_RAMB_END_CNTL2_B),\
+ SR(DWB_OGAM_RAMB_END_CNTL1_G),\
+ SR(DWB_OGAM_RAMB_END_CNTL2_G),\
+ SR(DWB_OGAM_RAMB_END_CNTL1_R),\
+ SR(DWB_OGAM_RAMB_END_CNTL2_R),\
+ SR(DWB_OGAM_RAMB_OFFSET_B),\
+ SR(DWB_OGAM_RAMB_OFFSET_G),\
+ SR(DWB_OGAM_RAMB_OFFSET_R),\
+ SR(DWB_OGAM_RAMB_REGION_0_1),\
+ SR(DWB_OGAM_RAMB_REGION_2_3),\
+ SR(DWB_OGAM_RAMB_REGION_4_5),\
+ SR(DWB_OGAM_RAMB_REGION_6_7),\
+ SR(DWB_OGAM_RAMB_REGION_8_9),\
+ SR(DWB_OGAM_RAMB_REGION_10_11),\
+ SR(DWB_OGAM_RAMB_REGION_12_13),\
+ SR(DWB_OGAM_RAMB_REGION_14_15),\
+ SR(DWB_OGAM_RAMB_REGION_16_17),\
+ SR(DWB_OGAM_RAMB_REGION_18_19),\
+ SR(DWB_OGAM_RAMB_REGION_20_21),\
+ SR(DWB_OGAM_RAMB_REGION_22_23),\
+ SR(DWB_OGAM_RAMB_REGION_24_25),\
+ SR(DWB_OGAM_RAMB_REGION_26_27),\
+ SR(DWB_OGAM_RAMB_REGION_28_29),\
+ SR(DWB_OGAM_RAMB_REGION_30_31),\
+ SR(DWB_OGAM_RAMB_REGION_32_33)
+
+
+#define DWBC_COMMON_MASK_SH_LIST_DCN30(mask_sh) \
+ SF_DWB2(DWB_ENABLE_CLK_CTRL, DWB_TOP, 0, DWB_ENABLE, mask_sh),\
+ SF_DWB2(DWB_ENABLE_CLK_CTRL, DWB_TOP, 0, DISPCLK_R_DWB_GATE_DIS, mask_sh),\
+ SF_DWB2(DWB_ENABLE_CLK_CTRL, DWB_TOP, 0, DISPCLK_G_DWB_GATE_DIS, mask_sh),\
+ SF_DWB2(DWB_ENABLE_CLK_CTRL, DWB_TOP, 0, DWB_TEST_CLK_SEL, mask_sh),\
+ SF_DWB2(DWB_MEM_PWR_CTRL, DWB_TOP, 0, DWB_OGAM_LUT_MEM_PWR_FORCE, mask_sh),\
+ SF_DWB2(DWB_MEM_PWR_CTRL, DWB_TOP, 0, DWB_OGAM_LUT_MEM_PWR_DIS, mask_sh),\
+ SF_DWB2(DWB_MEM_PWR_CTRL, DWB_TOP, 0, DWB_OGAM_LUT_MEM_PWR_STATE, mask_sh),\
+ SF_DWB2(FC_MODE_CTRL, DWB_TOP, 0, FC_FRAME_CAPTURE_EN, mask_sh),\
+ SF_DWB2(FC_MODE_CTRL, DWB_TOP, 0, FC_FRAME_CAPTURE_RATE, mask_sh),\
+ SF_DWB2(FC_MODE_CTRL, DWB_TOP, 0, FC_WINDOW_CROP_EN, mask_sh),\
+ SF_DWB2(FC_MODE_CTRL, DWB_TOP, 0, FC_EYE_SELECTION, mask_sh),\
+ SF_DWB2(FC_MODE_CTRL, DWB_TOP, 0, FC_STEREO_EYE_POLARITY, mask_sh),\
+ SF_DWB2(FC_MODE_CTRL, DWB_TOP, 0, FC_NEW_CONTENT, mask_sh),\
+ SF_DWB2(FC_MODE_CTRL, DWB_TOP, 0, FC_FRAME_CAPTURE_EN_CURRENT, mask_sh),\
+ SF_DWB2(FC_FLOW_CTRL, DWB_TOP, 0, FC_FIRST_PIXEL_DELAY_COUNT, mask_sh),\
+ SF_DWB2(FC_WINDOW_START, DWB_TOP, 0, FC_WINDOW_START_X, mask_sh),\
+ SF_DWB2(FC_WINDOW_START, DWB_TOP, 0, FC_WINDOW_START_Y, mask_sh),\
+ SF_DWB2(FC_WINDOW_SIZE, DWB_TOP, 0, FC_WINDOW_WIDTH, mask_sh),\
+ SF_DWB2(FC_WINDOW_SIZE, DWB_TOP, 0, FC_WINDOW_HEIGHT, mask_sh),\
+ SF_DWB2(FC_SOURCE_SIZE, DWB_TOP, 0, FC_SOURCE_WIDTH, mask_sh),\
+ SF_DWB2(FC_SOURCE_SIZE, DWB_TOP, 0, FC_SOURCE_HEIGHT, mask_sh),\
+ SF_DWB2(DWB_UPDATE_CTRL, DWB_TOP, 0, DWB_UPDATE_LOCK, mask_sh),\
+ SF_DWB2(DWB_UPDATE_CTRL, DWB_TOP, 0, DWB_UPDATE_PENDING, mask_sh),\
+ SF_DWB2(DWB_CRC_CTRL, DWB_TOP, 0, DWB_CRC_EN, mask_sh),\
+ SF_DWB2(DWB_CRC_CTRL, DWB_TOP, 0, DWB_CRC_CONT_EN, mask_sh),\
+ SF_DWB2(DWB_CRC_CTRL, DWB_TOP, 0, DWB_CRC_SRC_SEL, mask_sh),\
+ SF_DWB2(DWB_CRC_MASK_R_G, DWB_TOP, 0, DWB_CRC_RED_MASK, mask_sh),\
+ SF_DWB2(DWB_CRC_MASK_R_G, DWB_TOP, 0, DWB_CRC_GREEN_MASK, mask_sh),\
+ SF_DWB2(DWB_CRC_MASK_B_A, DWB_TOP, 0, DWB_CRC_BLUE_MASK, mask_sh),\
+ SF_DWB2(DWB_CRC_MASK_B_A, DWB_TOP, 0, DWB_CRC_A_MASK, mask_sh),\
+ SF_DWB2(DWB_CRC_VAL_R_G, DWB_TOP, 0, DWB_CRC_SIG_RED, mask_sh),\
+ SF_DWB2(DWB_CRC_VAL_R_G, DWB_TOP, 0, DWB_CRC_SIG_GREEN, mask_sh),\
+ SF_DWB2(DWB_CRC_VAL_B_A, DWB_TOP, 0, DWB_CRC_SIG_BLUE, mask_sh),\
+ SF_DWB2(DWB_CRC_VAL_B_A, DWB_TOP, 0, DWB_CRC_SIG_A, mask_sh),\
+ SF_DWB2(DWB_OUT_CTRL, DWB_TOP, 0, OUT_FORMAT, mask_sh),\
+ SF_DWB2(DWB_OUT_CTRL, DWB_TOP, 0, OUT_DENORM, mask_sh),\
+ SF_DWB2(DWB_OUT_CTRL, DWB_TOP, 0, OUT_MAX, mask_sh),\
+ SF_DWB2(DWB_OUT_CTRL, DWB_TOP, 0, OUT_MIN, mask_sh),\
+ SF_DWB2(DWB_MMHUBBUB_BACKPRESSURE_CNT_EN, DWB_TOP, 0, DWB_MMHUBBUB_BACKPRESSURE_CNT_EN, mask_sh),\
+ SF_DWB2(DWB_MMHUBBUB_BACKPRESSURE_CNT, DWB_TOP, 0, DWB_MMHUBBUB_MAX_BACKPRESSURE, mask_sh),\
+ SF_DWB2(DWB_HOST_READ_CONTROL, DWB_TOP, 0, DWB_HOST_READ_RATE_CONTROL, mask_sh),\
+ SF_DWB2(DWB_SOFT_RESET, DWB_TOP, 0, DWB_SOFT_RESET, mask_sh),\
+ SF_DWB2(DWB_HDR_MULT_COEF, DWBCP, 0, DWB_HDR_MULT_COEF, mask_sh),\
+ SF_DWB2(DWB_GAMUT_REMAP_MODE, DWBCP, 0, DWB_GAMUT_REMAP_MODE, mask_sh),\
+ SF_DWB2(DWB_GAMUT_REMAP_MODE, DWBCP, 0, DWB_GAMUT_REMAP_MODE_CURRENT, mask_sh),\
+ SF_DWB2(DWB_GAMUT_REMAP_COEF_FORMAT, DWBCP, 0, DWB_GAMUT_REMAP_COEF_FORMAT, mask_sh),\
+ SF_DWB2(DWB_GAMUT_REMAPA_C11_C12, DWBCP, 0, DWB_GAMUT_REMAPA_C11, mask_sh),\
+ SF_DWB2(DWB_GAMUT_REMAPA_C11_C12, DWBCP, 0, DWB_GAMUT_REMAPA_C12, mask_sh),\
+ SF_DWB2(DWB_GAMUT_REMAPA_C13_C14, DWBCP, 0, DWB_GAMUT_REMAPA_C13, mask_sh),\
+ SF_DWB2(DWB_GAMUT_REMAPA_C13_C14, DWBCP, 0, DWB_GAMUT_REMAPA_C14, mask_sh),\
+ SF_DWB2(DWB_GAMUT_REMAPA_C21_C22, DWBCP, 0, DWB_GAMUT_REMAPA_C21, mask_sh),\
+ SF_DWB2(DWB_GAMUT_REMAPA_C21_C22, DWBCP, 0, DWB_GAMUT_REMAPA_C22, mask_sh),\
+ SF_DWB2(DWB_GAMUT_REMAPA_C23_C24, DWBCP, 0, DWB_GAMUT_REMAPA_C23, mask_sh),\
+ SF_DWB2(DWB_GAMUT_REMAPA_C23_C24, DWBCP, 0, DWB_GAMUT_REMAPA_C24, mask_sh),\
+ SF_DWB2(DWB_GAMUT_REMAPA_C31_C32, DWBCP, 0, DWB_GAMUT_REMAPA_C31, mask_sh),\
+ SF_DWB2(DWB_GAMUT_REMAPA_C31_C32, DWBCP, 0, DWB_GAMUT_REMAPA_C32, mask_sh),\
+ SF_DWB2(DWB_GAMUT_REMAPA_C33_C34, DWBCP, 0, DWB_GAMUT_REMAPA_C33, mask_sh),\
+ SF_DWB2(DWB_GAMUT_REMAPA_C33_C34, DWBCP, 0, DWB_GAMUT_REMAPA_C34, mask_sh),\
+ SF_DWB2(DWB_GAMUT_REMAPB_C11_C12, DWBCP, 0, DWB_GAMUT_REMAPB_C11, mask_sh),\
+ SF_DWB2(DWB_GAMUT_REMAPB_C11_C12, DWBCP, 0, DWB_GAMUT_REMAPB_C12, mask_sh),\
+ SF_DWB2(DWB_GAMUT_REMAPB_C13_C14, DWBCP, 0, DWB_GAMUT_REMAPB_C13, mask_sh),\
+ SF_DWB2(DWB_GAMUT_REMAPB_C13_C14, DWBCP, 0, DWB_GAMUT_REMAPB_C14, mask_sh),\
+ SF_DWB2(DWB_GAMUT_REMAPB_C21_C22, DWBCP, 0, DWB_GAMUT_REMAPB_C21, mask_sh),\
+ SF_DWB2(DWB_GAMUT_REMAPB_C21_C22, DWBCP, 0, DWB_GAMUT_REMAPB_C22, mask_sh),\
+ SF_DWB2(DWB_GAMUT_REMAPB_C23_C24, DWBCP, 0, DWB_GAMUT_REMAPB_C23, mask_sh),\
+ SF_DWB2(DWB_GAMUT_REMAPB_C23_C24, DWBCP, 0, DWB_GAMUT_REMAPB_C24, mask_sh),\
+ SF_DWB2(DWB_GAMUT_REMAPB_C31_C32, DWBCP, 0, DWB_GAMUT_REMAPB_C31, mask_sh),\
+ SF_DWB2(DWB_GAMUT_REMAPB_C31_C32, DWBCP, 0, DWB_GAMUT_REMAPB_C32, mask_sh),\
+ SF_DWB2(DWB_GAMUT_REMAPB_C33_C34, DWBCP, 0, DWB_GAMUT_REMAPB_C33, mask_sh),\
+ SF_DWB2(DWB_GAMUT_REMAPB_C33_C34, DWBCP, 0, DWB_GAMUT_REMAPB_C34, mask_sh),\
+ SF_DWB2(DWB_OGAM_CONTROL, DWBCP, 0, DWB_OGAM_MODE, mask_sh),\
+ SF_DWB2(DWB_OGAM_CONTROL, DWBCP, 0, DWB_OGAM_SELECT, mask_sh),\
+ SF_DWB2(DWB_OGAM_CONTROL, DWBCP, 0, DWB_OGAM_PWL_DISABLE, mask_sh),\
+ SF_DWB2(DWB_OGAM_CONTROL, DWBCP, 0, DWB_OGAM_MODE_CURRENT, mask_sh),\
+ SF_DWB2(DWB_OGAM_CONTROL, DWBCP, 0, DWB_OGAM_SELECT_CURRENT, mask_sh),\
+ SF_DWB2(DWB_OGAM_LUT_INDEX, DWBCP, 0, DWB_OGAM_LUT_INDEX, mask_sh),\
+ SF_DWB2(DWB_OGAM_LUT_DATA, DWBCP, 0, DWB_OGAM_LUT_DATA, mask_sh),\
+ SF_DWB2(DWB_OGAM_LUT_CONTROL, DWBCP, 0, DWB_OGAM_LUT_WRITE_COLOR_MASK, mask_sh),\
+ SF_DWB2(DWB_OGAM_LUT_CONTROL, DWBCP, 0, DWB_OGAM_LUT_READ_COLOR_SEL, mask_sh),\
+ SF_DWB2(DWB_OGAM_LUT_CONTROL, DWBCP, 0, DWB_OGAM_LUT_READ_DBG, mask_sh),\
+ SF_DWB2(DWB_OGAM_LUT_CONTROL, DWBCP, 0, DWB_OGAM_LUT_HOST_SEL, mask_sh),\
+ SF_DWB2(DWB_OGAM_LUT_CONTROL, DWBCP, 0, DWB_OGAM_LUT_CONFIG_MODE, mask_sh),\
+ SF_DWB2(DWB_OGAM_RAMA_START_CNTL_B, DWBCP, 0, DWB_OGAM_RAMA_EXP_REGION_START_B, mask_sh),\
+ SF_DWB2(DWB_OGAM_RAMA_START_CNTL_B, DWBCP, 0, DWB_OGAM_RAMA_EXP_REGION_START_SEGMENT_B, mask_sh),\
+ SF_DWB2(DWB_OGAM_RAMA_START_CNTL_G, DWBCP, 0, DWB_OGAM_RAMA_EXP_REGION_START_G, mask_sh),\
+ SF_DWB2(DWB_OGAM_RAMA_START_CNTL_G, DWBCP, 0, DWB_OGAM_RAMA_EXP_REGION_START_SEGMENT_G, mask_sh),\
+ SF_DWB2(DWB_OGAM_RAMA_START_CNTL_R, DWBCP, 0, DWB_OGAM_RAMA_EXP_REGION_START_R, mask_sh),\
+ SF_DWB2(DWB_OGAM_RAMA_START_CNTL_R, DWBCP, 0, DWB_OGAM_RAMA_EXP_REGION_START_SEGMENT_R, mask_sh),\
+ SF_DWB2(DWB_OGAM_RAMA_START_BASE_CNTL_B, DWBCP, 0, DWB_OGAM_RAMA_EXP_REGION_START_BASE_B, mask_sh),\
+ SF_DWB2(DWB_OGAM_RAMA_START_SLOPE_CNTL_B, DWBCP, 0, DWB_OGAM_RAMA_EXP_REGION_START_SLOPE_B, mask_sh),\
+ SF_DWB2(DWB_OGAM_RAMA_START_BASE_CNTL_G, DWBCP, 0, DWB_OGAM_RAMA_EXP_REGION_START_BASE_G, mask_sh),\
+ SF_DWB2(DWB_OGAM_RAMA_START_SLOPE_CNTL_G, DWBCP, 0, DWB_OGAM_RAMA_EXP_REGION_START_SLOPE_G, mask_sh),\
+ SF_DWB2(DWB_OGAM_RAMA_START_BASE_CNTL_R, DWBCP, 0, DWB_OGAM_RAMA_EXP_REGION_START_BASE_R, mask_sh),\
+ SF_DWB2(DWB_OGAM_RAMA_START_SLOPE_CNTL_R, DWBCP, 0, DWB_OGAM_RAMA_EXP_REGION_START_SLOPE_R, mask_sh),\
+ SF_DWB2(DWB_OGAM_RAMA_END_CNTL1_B, DWBCP, 0, DWB_OGAM_RAMA_EXP_REGION_END_BASE_B, mask_sh),\
+ SF_DWB2(DWB_OGAM_RAMA_END_CNTL2_B, DWBCP, 0, DWB_OGAM_RAMA_EXP_REGION_END_B, mask_sh),\
+ SF_DWB2(DWB_OGAM_RAMA_END_CNTL2_B, DWBCP, 0, DWB_OGAM_RAMA_EXP_REGION_END_SLOPE_B, mask_sh),\
+ SF_DWB2(DWB_OGAM_RAMA_END_CNTL1_G, DWBCP, 0, DWB_OGAM_RAMA_EXP_REGION_END_BASE_G, mask_sh),\
+ SF_DWB2(DWB_OGAM_RAMA_END_CNTL2_G, DWBCP, 0, DWB_OGAM_RAMA_EXP_REGION_END_G, mask_sh),\
+ SF_DWB2(DWB_OGAM_RAMA_END_CNTL2_G, DWBCP, 0, DWB_OGAM_RAMA_EXP_REGION_END_SLOPE_G, mask_sh),\
+ SF_DWB2(DWB_OGAM_RAMA_END_CNTL1_R, DWBCP, 0, DWB_OGAM_RAMA_EXP_REGION_END_BASE_R, mask_sh),\
+ SF_DWB2(DWB_OGAM_RAMA_END_CNTL2_R, DWBCP, 0, DWB_OGAM_RAMA_EXP_REGION_END_R, mask_sh),\
+ SF_DWB2(DWB_OGAM_RAMA_END_CNTL2_R, DWBCP, 0, DWB_OGAM_RAMA_EXP_REGION_END_SLOPE_R, mask_sh),\
+ SF_DWB2(DWB_OGAM_RAMA_OFFSET_B, DWBCP, 0, DWB_OGAM_RAMA_OFFSET_B, mask_sh),\
+ SF_DWB2(DWB_OGAM_RAMA_OFFSET_G, DWBCP, 0, DWB_OGAM_RAMA_OFFSET_G, mask_sh),\
+ SF_DWB2(DWB_OGAM_RAMA_OFFSET_R, DWBCP, 0, DWB_OGAM_RAMA_OFFSET_R, mask_sh),\
+ SF_DWB2(DWB_OGAM_RAMA_REGION_0_1, DWBCP, 0, DWB_OGAM_RAMA_EXP_REGION0_LUT_OFFSET, mask_sh),\
+ SF_DWB2(DWB_OGAM_RAMA_REGION_0_1, DWBCP, 0, DWB_OGAM_RAMA_EXP_REGION0_NUM_SEGMENTS, mask_sh),\
+ SF_DWB2(DWB_OGAM_RAMA_REGION_0_1, DWBCP, 0, DWB_OGAM_RAMA_EXP_REGION1_LUT_OFFSET, mask_sh),\
+ SF_DWB2(DWB_OGAM_RAMA_REGION_0_1, DWBCP, 0, DWB_OGAM_RAMA_EXP_REGION1_NUM_SEGMENTS, mask_sh),\
+ SF_DWB2(DWB_OGAM_RAMA_REGION_2_3, DWBCP, 0, DWB_OGAM_RAMA_EXP_REGION2_LUT_OFFSET, mask_sh),\
+ SF_DWB2(DWB_OGAM_RAMA_REGION_2_3, DWBCP, 0, DWB_OGAM_RAMA_EXP_REGION2_NUM_SEGMENTS, mask_sh),\
+ SF_DWB2(DWB_OGAM_RAMA_REGION_2_3, DWBCP, 0, DWB_OGAM_RAMA_EXP_REGION3_LUT_OFFSET, mask_sh),\
+ SF_DWB2(DWB_OGAM_RAMA_REGION_2_3, DWBCP, 0, DWB_OGAM_RAMA_EXP_REGION3_NUM_SEGMENTS, mask_sh),\
+ SF_DWB2(DWB_OGAM_RAMA_REGION_4_5, DWBCP, 0, DWB_OGAM_RAMA_EXP_REGION4_LUT_OFFSET, mask_sh),\
+ SF_DWB2(DWB_OGAM_RAMA_REGION_4_5, DWBCP, 0, DWB_OGAM_RAMA_EXP_REGION4_NUM_SEGMENTS, mask_sh),\
+ SF_DWB2(DWB_OGAM_RAMA_REGION_4_5, DWBCP, 0, DWB_OGAM_RAMA_EXP_REGION5_LUT_OFFSET, mask_sh),\
+ SF_DWB2(DWB_OGAM_RAMA_REGION_4_5, DWBCP, 0, DWB_OGAM_RAMA_EXP_REGION5_NUM_SEGMENTS, mask_sh),\
+ SF_DWB2(DWB_OGAM_RAMA_REGION_6_7, DWBCP, 0, DWB_OGAM_RAMA_EXP_REGION6_LUT_OFFSET, mask_sh),\
+ SF_DWB2(DWB_OGAM_RAMA_REGION_6_7, DWBCP, 0, DWB_OGAM_RAMA_EXP_REGION6_NUM_SEGMENTS, mask_sh),\
+ SF_DWB2(DWB_OGAM_RAMA_REGION_6_7, DWBCP, 0, DWB_OGAM_RAMA_EXP_REGION7_LUT_OFFSET, mask_sh),\
+ SF_DWB2(DWB_OGAM_RAMA_REGION_6_7, DWBCP, 0, DWB_OGAM_RAMA_EXP_REGION7_NUM_SEGMENTS, mask_sh),\
+ SF_DWB2(DWB_OGAM_RAMA_REGION_8_9, DWBCP, 0, DWB_OGAM_RAMA_EXP_REGION8_LUT_OFFSET, mask_sh),\
+ SF_DWB2(DWB_OGAM_RAMA_REGION_8_9, DWBCP, 0, DWB_OGAM_RAMA_EXP_REGION8_NUM_SEGMENTS, mask_sh),\
+ SF_DWB2(DWB_OGAM_RAMA_REGION_8_9, DWBCP, 0, DWB_OGAM_RAMA_EXP_REGION9_LUT_OFFSET, mask_sh),\
+ SF_DWB2(DWB_OGAM_RAMA_REGION_8_9, DWBCP, 0, DWB_OGAM_RAMA_EXP_REGION9_NUM_SEGMENTS, mask_sh),\
+ SF_DWB2(DWB_OGAM_RAMA_REGION_10_11, DWBCP, 0, DWB_OGAM_RAMA_EXP_REGION10_LUT_OFFSET, mask_sh),\
+ SF_DWB2(DWB_OGAM_RAMA_REGION_10_11, DWBCP, 0, DWB_OGAM_RAMA_EXP_REGION10_NUM_SEGMENTS, mask_sh),\
+ SF_DWB2(DWB_OGAM_RAMA_REGION_10_11, DWBCP, 0, DWB_OGAM_RAMA_EXP_REGION11_LUT_OFFSET, mask_sh),\
+ SF_DWB2(DWB_OGAM_RAMA_REGION_10_11, DWBCP, 0, DWB_OGAM_RAMA_EXP_REGION11_NUM_SEGMENTS, mask_sh),\
+ SF_DWB2(DWB_OGAM_RAMA_REGION_12_13, DWBCP, 0, DWB_OGAM_RAMA_EXP_REGION12_LUT_OFFSET, mask_sh),\
+ SF_DWB2(DWB_OGAM_RAMA_REGION_12_13, DWBCP, 0, DWB_OGAM_RAMA_EXP_REGION12_NUM_SEGMENTS, mask_sh),\
+ SF_DWB2(DWB_OGAM_RAMA_REGION_12_13, DWBCP, 0, DWB_OGAM_RAMA_EXP_REGION13_LUT_OFFSET, mask_sh),\
+ SF_DWB2(DWB_OGAM_RAMA_REGION_12_13, DWBCP, 0, DWB_OGAM_RAMA_EXP_REGION13_NUM_SEGMENTS, mask_sh),\
+ SF_DWB2(DWB_OGAM_RAMA_REGION_14_15, DWBCP, 0, DWB_OGAM_RAMA_EXP_REGION14_LUT_OFFSET, mask_sh),\
+ SF_DWB2(DWB_OGAM_RAMA_REGION_14_15, DWBCP, 0, DWB_OGAM_RAMA_EXP_REGION14_NUM_SEGMENTS, mask_sh),\
+ SF_DWB2(DWB_OGAM_RAMA_REGION_14_15, DWBCP, 0, DWB_OGAM_RAMA_EXP_REGION15_LUT_OFFSET, mask_sh),\
+ SF_DWB2(DWB_OGAM_RAMA_REGION_14_15, DWBCP, 0, DWB_OGAM_RAMA_EXP_REGION15_NUM_SEGMENTS, mask_sh),\
+ SF_DWB2(DWB_OGAM_RAMA_REGION_16_17, DWBCP, 0, DWB_OGAM_RAMA_EXP_REGION16_LUT_OFFSET, mask_sh),\
+ SF_DWB2(DWB_OGAM_RAMA_REGION_16_17, DWBCP, 0, DWB_OGAM_RAMA_EXP_REGION16_NUM_SEGMENTS, mask_sh),\
+ SF_DWB2(DWB_OGAM_RAMA_REGION_16_17, DWBCP, 0, DWB_OGAM_RAMA_EXP_REGION17_LUT_OFFSET, mask_sh),\
+ SF_DWB2(DWB_OGAM_RAMA_REGION_16_17, DWBCP, 0, DWB_OGAM_RAMA_EXP_REGION17_NUM_SEGMENTS, mask_sh),\
+ SF_DWB2(DWB_OGAM_RAMA_REGION_18_19, DWBCP, 0, DWB_OGAM_RAMA_EXP_REGION18_LUT_OFFSET, mask_sh),\
+ SF_DWB2(DWB_OGAM_RAMA_REGION_18_19, DWBCP, 0, DWB_OGAM_RAMA_EXP_REGION18_NUM_SEGMENTS, mask_sh),\
+ SF_DWB2(DWB_OGAM_RAMA_REGION_18_19, DWBCP, 0, DWB_OGAM_RAMA_EXP_REGION19_LUT_OFFSET, mask_sh),\
+ SF_DWB2(DWB_OGAM_RAMA_REGION_18_19, DWBCP, 0, DWB_OGAM_RAMA_EXP_REGION19_NUM_SEGMENTS, mask_sh),\
+ SF_DWB2(DWB_OGAM_RAMA_REGION_20_21, DWBCP, 0, DWB_OGAM_RAMA_EXP_REGION20_LUT_OFFSET, mask_sh),\
+ SF_DWB2(DWB_OGAM_RAMA_REGION_20_21, DWBCP, 0, DWB_OGAM_RAMA_EXP_REGION20_NUM_SEGMENTS, mask_sh),\
+ SF_DWB2(DWB_OGAM_RAMA_REGION_20_21, DWBCP, 0, DWB_OGAM_RAMA_EXP_REGION21_LUT_OFFSET, mask_sh),\
+ SF_DWB2(DWB_OGAM_RAMA_REGION_20_21, DWBCP, 0, DWB_OGAM_RAMA_EXP_REGION21_NUM_SEGMENTS, mask_sh),\
+ SF_DWB2(DWB_OGAM_RAMA_REGION_22_23, DWBCP, 0, DWB_OGAM_RAMA_EXP_REGION22_LUT_OFFSET, mask_sh),\
+ SF_DWB2(DWB_OGAM_RAMA_REGION_22_23, DWBCP, 0, DWB_OGAM_RAMA_EXP_REGION22_NUM_SEGMENTS, mask_sh),\
+ SF_DWB2(DWB_OGAM_RAMA_REGION_22_23, DWBCP, 0, DWB_OGAM_RAMA_EXP_REGION23_LUT_OFFSET, mask_sh),\
+ SF_DWB2(DWB_OGAM_RAMA_REGION_22_23, DWBCP, 0, DWB_OGAM_RAMA_EXP_REGION23_NUM_SEGMENTS, mask_sh),\
+ SF_DWB2(DWB_OGAM_RAMA_REGION_24_25, DWBCP, 0, DWB_OGAM_RAMA_EXP_REGION24_LUT_OFFSET, mask_sh),\
+ SF_DWB2(DWB_OGAM_RAMA_REGION_24_25, DWBCP, 0, DWB_OGAM_RAMA_EXP_REGION24_NUM_SEGMENTS, mask_sh),\
+ SF_DWB2(DWB_OGAM_RAMA_REGION_24_25, DWBCP, 0, DWB_OGAM_RAMA_EXP_REGION25_LUT_OFFSET, mask_sh),\
+ SF_DWB2(DWB_OGAM_RAMA_REGION_24_25, DWBCP, 0, DWB_OGAM_RAMA_EXP_REGION25_NUM_SEGMENTS, mask_sh),\
+ SF_DWB2(DWB_OGAM_RAMA_REGION_26_27, DWBCP, 0, DWB_OGAM_RAMA_EXP_REGION26_LUT_OFFSET, mask_sh),\
+ SF_DWB2(DWB_OGAM_RAMA_REGION_26_27, DWBCP, 0, DWB_OGAM_RAMA_EXP_REGION26_NUM_SEGMENTS, mask_sh),\
+ SF_DWB2(DWB_OGAM_RAMA_REGION_26_27, DWBCP, 0, DWB_OGAM_RAMA_EXP_REGION27_LUT_OFFSET, mask_sh),\
+ SF_DWB2(DWB_OGAM_RAMA_REGION_26_27, DWBCP, 0, DWB_OGAM_RAMA_EXP_REGION27_NUM_SEGMENTS, mask_sh),\
+ SF_DWB2(DWB_OGAM_RAMA_REGION_28_29, DWBCP, 0, DWB_OGAM_RAMA_EXP_REGION28_LUT_OFFSET, mask_sh),\
+ SF_DWB2(DWB_OGAM_RAMA_REGION_28_29, DWBCP, 0, DWB_OGAM_RAMA_EXP_REGION28_NUM_SEGMENTS, mask_sh),\
+ SF_DWB2(DWB_OGAM_RAMA_REGION_28_29, DWBCP, 0, DWB_OGAM_RAMA_EXP_REGION29_LUT_OFFSET, mask_sh),\
+ SF_DWB2(DWB_OGAM_RAMA_REGION_28_29, DWBCP, 0, DWB_OGAM_RAMA_EXP_REGION29_NUM_SEGMENTS, mask_sh),\
+ SF_DWB2(DWB_OGAM_RAMA_REGION_30_31, DWBCP, 0, DWB_OGAM_RAMA_EXP_REGION30_LUT_OFFSET, mask_sh),\
+ SF_DWB2(DWB_OGAM_RAMA_REGION_30_31, DWBCP, 0, DWB_OGAM_RAMA_EXP_REGION30_NUM_SEGMENTS, mask_sh),\
+ SF_DWB2(DWB_OGAM_RAMA_REGION_30_31, DWBCP, 0, DWB_OGAM_RAMA_EXP_REGION31_LUT_OFFSET, mask_sh),\
+ SF_DWB2(DWB_OGAM_RAMA_REGION_30_31, DWBCP, 0, DWB_OGAM_RAMA_EXP_REGION31_NUM_SEGMENTS, mask_sh),\
+ SF_DWB2(DWB_OGAM_RAMA_REGION_32_33, DWBCP, 0, DWB_OGAM_RAMA_EXP_REGION32_LUT_OFFSET, mask_sh),\
+ SF_DWB2(DWB_OGAM_RAMA_REGION_32_33, DWBCP, 0, DWB_OGAM_RAMA_EXP_REGION32_NUM_SEGMENTS, mask_sh),\
+ SF_DWB2(DWB_OGAM_RAMA_REGION_32_33, DWBCP, 0, DWB_OGAM_RAMA_EXP_REGION33_LUT_OFFSET, mask_sh),\
+ SF_DWB2(DWB_OGAM_RAMA_REGION_32_33, DWBCP, 0, DWB_OGAM_RAMA_EXP_REGION33_NUM_SEGMENTS, mask_sh),\
+ SF_DWB2(DWB_OGAM_RAMB_START_CNTL_B, DWBCP, 0, DWB_OGAM_RAMB_EXP_REGION_START_B, mask_sh),\
+ SF_DWB2(DWB_OGAM_RAMB_START_CNTL_B, DWBCP, 0, DWB_OGAM_RAMB_EXP_REGION_START_SEGMENT_B, mask_sh),\
+ SF_DWB2(DWB_OGAM_RAMB_START_CNTL_G, DWBCP, 0, DWB_OGAM_RAMB_EXP_REGION_START_G, mask_sh),\
+ SF_DWB2(DWB_OGAM_RAMB_START_CNTL_G, DWBCP, 0, DWB_OGAM_RAMB_EXP_REGION_START_SEGMENT_G, mask_sh),\
+ SF_DWB2(DWB_OGAM_RAMB_START_CNTL_R, DWBCP, 0, DWB_OGAM_RAMB_EXP_REGION_START_R, mask_sh),\
+ SF_DWB2(DWB_OGAM_RAMB_START_CNTL_R, DWBCP, 0, DWB_OGAM_RAMB_EXP_REGION_START_SEGMENT_R, mask_sh),\
+ SF_DWB2(DWB_OGAM_RAMB_START_BASE_CNTL_B, DWBCP, 0, DWB_OGAM_RAMB_EXP_REGION_START_BASE_B, mask_sh),\
+ SF_DWB2(DWB_OGAM_RAMB_START_SLOPE_CNTL_B, DWBCP, 0, DWB_OGAM_RAMB_EXP_REGION_START_SLOPE_B, mask_sh),\
+ SF_DWB2(DWB_OGAM_RAMB_START_BASE_CNTL_G, DWBCP, 0, DWB_OGAM_RAMB_EXP_REGION_START_BASE_G, mask_sh),\
+ SF_DWB2(DWB_OGAM_RAMB_START_SLOPE_CNTL_G, DWBCP, 0, DWB_OGAM_RAMB_EXP_REGION_START_SLOPE_G, mask_sh),\
+ SF_DWB2(DWB_OGAM_RAMB_START_BASE_CNTL_R, DWBCP, 0, DWB_OGAM_RAMB_EXP_REGION_START_BASE_R, mask_sh),\
+ SF_DWB2(DWB_OGAM_RAMB_START_SLOPE_CNTL_R, DWBCP, 0, DWB_OGAM_RAMB_EXP_REGION_START_SLOPE_R, mask_sh),\
+ SF_DWB2(DWB_OGAM_RAMB_END_CNTL1_B, DWBCP, 0, DWB_OGAM_RAMB_EXP_REGION_END_BASE_B, mask_sh),\
+ SF_DWB2(DWB_OGAM_RAMB_END_CNTL2_B, DWBCP, 0, DWB_OGAM_RAMB_EXP_REGION_END_B, mask_sh),\
+ SF_DWB2(DWB_OGAM_RAMB_END_CNTL2_B, DWBCP, 0, DWB_OGAM_RAMB_EXP_REGION_END_SLOPE_B, mask_sh),\
+ SF_DWB2(DWB_OGAM_RAMB_END_CNTL1_G, DWBCP, 0, DWB_OGAM_RAMB_EXP_REGION_END_BASE_G, mask_sh),\
+ SF_DWB2(DWB_OGAM_RAMB_END_CNTL2_G, DWBCP, 0, DWB_OGAM_RAMB_EXP_REGION_END_G, mask_sh),\
+ SF_DWB2(DWB_OGAM_RAMB_END_CNTL2_G, DWBCP, 0, DWB_OGAM_RAMB_EXP_REGION_END_SLOPE_G, mask_sh),\
+ SF_DWB2(DWB_OGAM_RAMB_END_CNTL1_R, DWBCP, 0, DWB_OGAM_RAMB_EXP_REGION_END_BASE_R, mask_sh),\
+ SF_DWB2(DWB_OGAM_RAMB_END_CNTL2_R, DWBCP, 0, DWB_OGAM_RAMB_EXP_REGION_END_R, mask_sh),\
+ SF_DWB2(DWB_OGAM_RAMB_END_CNTL2_R, DWBCP, 0, DWB_OGAM_RAMB_EXP_REGION_END_SLOPE_R, mask_sh),\
+ SF_DWB2(DWB_OGAM_RAMB_OFFSET_B, DWBCP, 0, DWB_OGAM_RAMB_OFFSET_B, mask_sh),\
+ SF_DWB2(DWB_OGAM_RAMB_OFFSET_G, DWBCP, 0, DWB_OGAM_RAMB_OFFSET_G, mask_sh),\
+ SF_DWB2(DWB_OGAM_RAMB_OFFSET_R, DWBCP, 0, DWB_OGAM_RAMB_OFFSET_R, mask_sh),\
+ SF_DWB2(DWB_OGAM_RAMB_REGION_0_1, DWBCP, 0, DWB_OGAM_RAMB_EXP_REGION0_LUT_OFFSET, mask_sh),\
+ SF_DWB2(DWB_OGAM_RAMB_REGION_0_1, DWBCP, 0, DWB_OGAM_RAMB_EXP_REGION0_NUM_SEGMENTS, mask_sh),\
+ SF_DWB2(DWB_OGAM_RAMB_REGION_0_1, DWBCP, 0, DWB_OGAM_RAMB_EXP_REGION1_LUT_OFFSET, mask_sh),\
+ SF_DWB2(DWB_OGAM_RAMB_REGION_0_1, DWBCP, 0, DWB_OGAM_RAMB_EXP_REGION1_NUM_SEGMENTS, mask_sh),\
+ SF_DWB2(DWB_OGAM_RAMB_REGION_2_3, DWBCP, 0, DWB_OGAM_RAMB_EXP_REGION2_LUT_OFFSET, mask_sh),\
+ SF_DWB2(DWB_OGAM_RAMB_REGION_2_3, DWBCP, 0, DWB_OGAM_RAMB_EXP_REGION2_NUM_SEGMENTS, mask_sh),\
+ SF_DWB2(DWB_OGAM_RAMB_REGION_2_3, DWBCP, 0, DWB_OGAM_RAMB_EXP_REGION3_LUT_OFFSET, mask_sh),\
+ SF_DWB2(DWB_OGAM_RAMB_REGION_2_3, DWBCP, 0, DWB_OGAM_RAMB_EXP_REGION3_NUM_SEGMENTS, mask_sh),\
+ SF_DWB2(DWB_OGAM_RAMB_REGION_4_5, DWBCP, 0, DWB_OGAM_RAMB_EXP_REGION4_LUT_OFFSET, mask_sh),\
+ SF_DWB2(DWB_OGAM_RAMB_REGION_4_5, DWBCP, 0, DWB_OGAM_RAMB_EXP_REGION4_NUM_SEGMENTS, mask_sh),\
+ SF_DWB2(DWB_OGAM_RAMB_REGION_4_5, DWBCP, 0, DWB_OGAM_RAMB_EXP_REGION5_LUT_OFFSET, mask_sh),\
+ SF_DWB2(DWB_OGAM_RAMB_REGION_4_5, DWBCP, 0, DWB_OGAM_RAMB_EXP_REGION5_NUM_SEGMENTS, mask_sh),\
+ SF_DWB2(DWB_OGAM_RAMB_REGION_6_7, DWBCP, 0, DWB_OGAM_RAMB_EXP_REGION6_LUT_OFFSET, mask_sh),\
+ SF_DWB2(DWB_OGAM_RAMB_REGION_6_7, DWBCP, 0, DWB_OGAM_RAMB_EXP_REGION6_NUM_SEGMENTS, mask_sh),\
+ SF_DWB2(DWB_OGAM_RAMB_REGION_6_7, DWBCP, 0, DWB_OGAM_RAMB_EXP_REGION7_LUT_OFFSET, mask_sh),\
+ SF_DWB2(DWB_OGAM_RAMB_REGION_6_7, DWBCP, 0, DWB_OGAM_RAMB_EXP_REGION7_NUM_SEGMENTS, mask_sh),\
+ SF_DWB2(DWB_OGAM_RAMB_REGION_8_9, DWBCP, 0, DWB_OGAM_RAMB_EXP_REGION8_LUT_OFFSET, mask_sh),\
+ SF_DWB2(DWB_OGAM_RAMB_REGION_8_9, DWBCP, 0, DWB_OGAM_RAMB_EXP_REGION8_NUM_SEGMENTS, mask_sh),\
+ SF_DWB2(DWB_OGAM_RAMB_REGION_8_9, DWBCP, 0, DWB_OGAM_RAMB_EXP_REGION9_LUT_OFFSET, mask_sh),\
+ SF_DWB2(DWB_OGAM_RAMB_REGION_8_9, DWBCP, 0, DWB_OGAM_RAMB_EXP_REGION9_NUM_SEGMENTS, mask_sh),\
+ SF_DWB2(DWB_OGAM_RAMB_REGION_10_11, DWBCP, 0, DWB_OGAM_RAMB_EXP_REGION10_LUT_OFFSET, mask_sh),\
+ SF_DWB2(DWB_OGAM_RAMB_REGION_10_11, DWBCP, 0, DWB_OGAM_RAMB_EXP_REGION10_NUM_SEGMENTS, mask_sh),\
+ SF_DWB2(DWB_OGAM_RAMB_REGION_10_11, DWBCP, 0, DWB_OGAM_RAMB_EXP_REGION11_LUT_OFFSET, mask_sh),\
+ SF_DWB2(DWB_OGAM_RAMB_REGION_10_11, DWBCP, 0, DWB_OGAM_RAMB_EXP_REGION11_NUM_SEGMENTS, mask_sh),\
+ SF_DWB2(DWB_OGAM_RAMB_REGION_12_13, DWBCP, 0, DWB_OGAM_RAMB_EXP_REGION12_LUT_OFFSET, mask_sh),\
+ SF_DWB2(DWB_OGAM_RAMB_REGION_12_13, DWBCP, 0, DWB_OGAM_RAMB_EXP_REGION12_NUM_SEGMENTS, mask_sh),\
+ SF_DWB2(DWB_OGAM_RAMB_REGION_12_13, DWBCP, 0, DWB_OGAM_RAMB_EXP_REGION13_LUT_OFFSET, mask_sh),\
+ SF_DWB2(DWB_OGAM_RAMB_REGION_12_13, DWBCP, 0, DWB_OGAM_RAMB_EXP_REGION13_NUM_SEGMENTS, mask_sh),\
+ SF_DWB2(DWB_OGAM_RAMB_REGION_14_15, DWBCP, 0, DWB_OGAM_RAMB_EXP_REGION14_LUT_OFFSET, mask_sh),\
+ SF_DWB2(DWB_OGAM_RAMB_REGION_14_15, DWBCP, 0, DWB_OGAM_RAMB_EXP_REGION14_NUM_SEGMENTS, mask_sh),\
+ SF_DWB2(DWB_OGAM_RAMB_REGION_14_15, DWBCP, 0, DWB_OGAM_RAMB_EXP_REGION15_LUT_OFFSET, mask_sh),\
+ SF_DWB2(DWB_OGAM_RAMB_REGION_14_15, DWBCP, 0, DWB_OGAM_RAMB_EXP_REGION15_NUM_SEGMENTS, mask_sh),\
+ SF_DWB2(DWB_OGAM_RAMB_REGION_16_17, DWBCP, 0, DWB_OGAM_RAMB_EXP_REGION16_LUT_OFFSET, mask_sh),\
+ SF_DWB2(DWB_OGAM_RAMB_REGION_16_17, DWBCP, 0, DWB_OGAM_RAMB_EXP_REGION16_NUM_SEGMENTS, mask_sh),\
+ SF_DWB2(DWB_OGAM_RAMB_REGION_16_17, DWBCP, 0, DWB_OGAM_RAMB_EXP_REGION17_LUT_OFFSET, mask_sh),\
+ SF_DWB2(DWB_OGAM_RAMB_REGION_16_17, DWBCP, 0, DWB_OGAM_RAMB_EXP_REGION17_NUM_SEGMENTS, mask_sh),\
+ SF_DWB2(DWB_OGAM_RAMB_REGION_18_19, DWBCP, 0, DWB_OGAM_RAMB_EXP_REGION18_LUT_OFFSET, mask_sh),\
+ SF_DWB2(DWB_OGAM_RAMB_REGION_18_19, DWBCP, 0, DWB_OGAM_RAMB_EXP_REGION18_NUM_SEGMENTS, mask_sh),\
+ SF_DWB2(DWB_OGAM_RAMB_REGION_18_19, DWBCP, 0, DWB_OGAM_RAMB_EXP_REGION19_LUT_OFFSET, mask_sh),\
+ SF_DWB2(DWB_OGAM_RAMB_REGION_18_19, DWBCP, 0, DWB_OGAM_RAMB_EXP_REGION19_NUM_SEGMENTS, mask_sh),\
+ SF_DWB2(DWB_OGAM_RAMB_REGION_20_21, DWBCP, 0, DWB_OGAM_RAMB_EXP_REGION20_LUT_OFFSET, mask_sh),\
+ SF_DWB2(DWB_OGAM_RAMB_REGION_20_21, DWBCP, 0, DWB_OGAM_RAMB_EXP_REGION20_NUM_SEGMENTS, mask_sh),\
+ SF_DWB2(DWB_OGAM_RAMB_REGION_20_21, DWBCP, 0, DWB_OGAM_RAMB_EXP_REGION21_LUT_OFFSET, mask_sh),\
+ SF_DWB2(DWB_OGAM_RAMB_REGION_20_21, DWBCP, 0, DWB_OGAM_RAMB_EXP_REGION21_NUM_SEGMENTS, mask_sh),\
+ SF_DWB2(DWB_OGAM_RAMB_REGION_22_23, DWBCP, 0, DWB_OGAM_RAMB_EXP_REGION22_LUT_OFFSET, mask_sh),\
+ SF_DWB2(DWB_OGAM_RAMB_REGION_22_23, DWBCP, 0, DWB_OGAM_RAMB_EXP_REGION22_NUM_SEGMENTS, mask_sh),\
+ SF_DWB2(DWB_OGAM_RAMB_REGION_22_23, DWBCP, 0, DWB_OGAM_RAMB_EXP_REGION23_LUT_OFFSET, mask_sh),\
+ SF_DWB2(DWB_OGAM_RAMB_REGION_22_23, DWBCP, 0, DWB_OGAM_RAMB_EXP_REGION23_NUM_SEGMENTS, mask_sh),\
+ SF_DWB2(DWB_OGAM_RAMB_REGION_24_25, DWBCP, 0, DWB_OGAM_RAMB_EXP_REGION24_LUT_OFFSET, mask_sh),\
+ SF_DWB2(DWB_OGAM_RAMB_REGION_24_25, DWBCP, 0, DWB_OGAM_RAMB_EXP_REGION24_NUM_SEGMENTS, mask_sh),\
+ SF_DWB2(DWB_OGAM_RAMB_REGION_24_25, DWBCP, 0, DWB_OGAM_RAMB_EXP_REGION25_LUT_OFFSET, mask_sh),\
+ SF_DWB2(DWB_OGAM_RAMB_REGION_24_25, DWBCP, 0, DWB_OGAM_RAMB_EXP_REGION25_NUM_SEGMENTS, mask_sh),\
+ SF_DWB2(DWB_OGAM_RAMB_REGION_26_27, DWBCP, 0, DWB_OGAM_RAMB_EXP_REGION26_LUT_OFFSET, mask_sh),\
+ SF_DWB2(DWB_OGAM_RAMB_REGION_26_27, DWBCP, 0, DWB_OGAM_RAMB_EXP_REGION26_NUM_SEGMENTS, mask_sh),\
+ SF_DWB2(DWB_OGAM_RAMB_REGION_26_27, DWBCP, 0, DWB_OGAM_RAMB_EXP_REGION27_LUT_OFFSET, mask_sh),\
+ SF_DWB2(DWB_OGAM_RAMB_REGION_26_27, DWBCP, 0, DWB_OGAM_RAMB_EXP_REGION27_NUM_SEGMENTS, mask_sh),\
+ SF_DWB2(DWB_OGAM_RAMB_REGION_28_29, DWBCP, 0, DWB_OGAM_RAMB_EXP_REGION28_LUT_OFFSET, mask_sh),\
+ SF_DWB2(DWB_OGAM_RAMB_REGION_28_29, DWBCP, 0, DWB_OGAM_RAMB_EXP_REGION28_NUM_SEGMENTS, mask_sh),\
+ SF_DWB2(DWB_OGAM_RAMB_REGION_28_29, DWBCP, 0, DWB_OGAM_RAMB_EXP_REGION29_LUT_OFFSET, mask_sh),\
+ SF_DWB2(DWB_OGAM_RAMB_REGION_28_29, DWBCP, 0, DWB_OGAM_RAMB_EXP_REGION29_NUM_SEGMENTS, mask_sh),\
+ SF_DWB2(DWB_OGAM_RAMB_REGION_30_31, DWBCP, 0, DWB_OGAM_RAMB_EXP_REGION30_LUT_OFFSET, mask_sh),\
+ SF_DWB2(DWB_OGAM_RAMB_REGION_30_31, DWBCP, 0, DWB_OGAM_RAMB_EXP_REGION30_NUM_SEGMENTS, mask_sh),\
+ SF_DWB2(DWB_OGAM_RAMB_REGION_30_31, DWBCP, 0, DWB_OGAM_RAMB_EXP_REGION31_LUT_OFFSET, mask_sh),\
+ SF_DWB2(DWB_OGAM_RAMB_REGION_30_31, DWBCP, 0, DWB_OGAM_RAMB_EXP_REGION31_NUM_SEGMENTS, mask_sh),\
+ SF_DWB2(DWB_OGAM_RAMB_REGION_32_33, DWBCP, 0, DWB_OGAM_RAMB_EXP_REGION32_LUT_OFFSET, mask_sh),\
+ SF_DWB2(DWB_OGAM_RAMB_REGION_32_33, DWBCP, 0, DWB_OGAM_RAMB_EXP_REGION32_NUM_SEGMENTS, mask_sh),\
+ SF_DWB2(DWB_OGAM_RAMB_REGION_32_33, DWBCP, 0, DWB_OGAM_RAMB_EXP_REGION33_LUT_OFFSET, mask_sh),\
+ SF_DWB2(DWB_OGAM_RAMB_REGION_32_33, DWBCP, 0, DWB_OGAM_RAMB_EXP_REGION33_NUM_SEGMENTS, mask_sh)
+
+
+#define DWBC_REG_FIELD_LIST_DCN3_0(type) \
+ type DWB_ENABLE;\
+ type DISPCLK_R_DWB_GATE_DIS;\
+ type DISPCLK_G_DWB_GATE_DIS;\
+ type DWB_TEST_CLK_SEL;\
+ type DWBSCL_LUT_MEM_PWR_FORCE;\
+ type DWBSCL_LUT_MEM_PWR_DIS;\
+ type DWBSCL_LUT_MEM_PWR_STATE;\
+ type DWBSCL_LB_MEM_PWR_FORCE;\
+ type DWBSCL_LB_MEM_PWR_DIS;\
+ type DWBSCL_LB_MEM_PWR_STATE;\
+ type DWB_OGAM_LUT_MEM_PWR_FORCE;\
+ type DWB_OGAM_LUT_MEM_PWR_DIS;\
+ type DWB_OGAM_LUT_MEM_PWR_STATE;\
+ type FC_FRAME_CAPTURE_EN;\
+ type FC_FRAME_CAPTURE_RATE;\
+ type FC_WINDOW_CROP_EN;\
+ type FC_EYE_SELECTION;\
+ type FC_STEREO_EYE_POLARITY;\
+ type FC_NEW_CONTENT;\
+ type FC_FI_EN;\
+ type FC_FI_PHASE;\
+ type FC_FRAME_CAPTURE_EN_CURRENT;\
+ type FC_FIRST_PIXEL_DELAY_COUNT;\
+ type FC_WINDOW_START_X;\
+ type FC_WINDOW_START_Y;\
+ type FC_WINDOW_WIDTH;\
+ type FC_WINDOW_HEIGHT;\
+ type FC_SOURCE_WIDTH;\
+ type FC_SOURCE_HEIGHT;\
+ type DWB_UPDATE_LOCK;\
+ type DWB_UPDATE_PENDING;\
+ type DWB_CRC_EN;\
+ type DWB_CRC_CONT_EN;\
+ type DWB_CRC_SRC_SEL;\
+ type DWB_CRC_RED_MASK;\
+ type DWB_CRC_GREEN_MASK;\
+ type DWB_CRC_BLUE_MASK;\
+ type DWB_CRC_A_MASK;\
+ type DWB_CRC_SIG_RED;\
+ type DWB_CRC_SIG_GREEN;\
+ type DWB_CRC_SIG_BLUE;\
+ type DWB_CRC_SIG_A;\
+ type OUT_FORMAT;\
+ type OUT_DENORM;\
+ type OUT_MAX;\
+ type OUT_MIN;\
+ type DWB_MMHUBBUB_BACKPRESSURE_CNT_EN;\
+ type DWB_MMHUBBUB_MAX_BACKPRESSURE;\
+ type DWB_HOST_READ_RATE_CONTROL;\
+ type DWBSCL_DATA_OVERFLOW_FLAG;\
+ type DWBSCL_DATA_OVERFLOW_ACK;\
+ type DWBSCL_DATA_OVERFLOW_MASK;\
+ type DWBSCL_DATA_OVERFLOW_INT_STATUS;\
+ type DWBSCL_DATA_OVERFLOW_INT_TYPE;\
+ type DWBSCL_DATA_OVERFLOW_TYPE;\
+ type DWBSCL_DATA_OVERFLOW_OUT_X_CNT;\
+ type DWBSCL_DATA_OVERFLOW_OUT_Y_CNT;\
+ type DWB_SOFT_RESET;\
+ type DWBSCL_COEF_RAM_TAP_PAIR_IDX;\
+ type DWBSCL_COEF_RAM_PHASE;\
+ type DWBSCL_COEF_RAM_FILTER_TYPE;\
+ type DWBSCL_COEF_RAM_SELECT_RD;\
+ type DWBSCL_COEF_RAM_EVEN_TAP_COEF;\
+ type DWBSCL_COEF_RAM_EVEN_TAP_COEF_EN;\
+ type DWBSCL_COEF_RAM_ODD_TAP_COEF;\
+ type DWBSCL_COEF_RAM_ODD_TAP_COEF_EN;\
+ type DWBSCL_MODE;\
+ type DWBSCL_COEF_RAM_SELECT;\
+ type DWBSCL_COEF_RAM_SELECT_CURRENT;\
+ type DWBSCL_H_NUM_OF_TAPS;\
+ type DWBSCL_V_NUM_OF_TAPS;\
+ type DWBSCL_H_SCALE_RATIO;\
+ type DWBSCL_H_INIT_FRAC;\
+ type DWBSCL_H_INIT_INT;\
+ type DWBSCL_V_SCALE_RATIO;\
+ type DWBSCL_V_INIT_FRAC;\
+ type DWBSCL_V_INIT_INT;\
+ type DWBSCL_BOUNDARY_MODE;\
+ type DWBSCL_BLACK_COLOR_RGB;\
+ type DWBSCL_DEST_WIDTH;\
+ type DWBSCL_DEST_HEIGHT;\
+ type DWB_HDR_MULT_COEF;\
+ type DWB_GAMUT_REMAP_MODE;\
+ type DWB_GAMUT_REMAP_MODE_CURRENT;\
+ type DWB_GAMUT_REMAP_COEF_FORMAT;\
+ type DWB_GAMUT_REMAPA_C11;\
+ type DWB_GAMUT_REMAPA_C12;\
+ type DWB_GAMUT_REMAPA_C13;\
+ type DWB_GAMUT_REMAPA_C14;\
+ type DWB_GAMUT_REMAPA_C21;\
+ type DWB_GAMUT_REMAPA_C22;\
+ type DWB_GAMUT_REMAPA_C23;\
+ type DWB_GAMUT_REMAPA_C24;\
+ type DWB_GAMUT_REMAPA_C31;\
+ type DWB_GAMUT_REMAPA_C32;\
+ type DWB_GAMUT_REMAPA_C33;\
+ type DWB_GAMUT_REMAPA_C34;\
+ type DWB_GAMUT_REMAPB_C11;\
+ type DWB_GAMUT_REMAPB_C12;\
+ type DWB_GAMUT_REMAPB_C13;\
+ type DWB_GAMUT_REMAPB_C14;\
+ type DWB_GAMUT_REMAPB_C21;\
+ type DWB_GAMUT_REMAPB_C22;\
+ type DWB_GAMUT_REMAPB_C23;\
+ type DWB_GAMUT_REMAPB_C24;\
+ type DWB_GAMUT_REMAPB_C31;\
+ type DWB_GAMUT_REMAPB_C32;\
+ type DWB_GAMUT_REMAPB_C33;\
+ type DWB_GAMUT_REMAPB_C34;\
+ type DWB_OGAM_MODE;\
+ type DWB_OGAM_SELECT;\
+ type DWB_OGAM_PWL_DISABLE;\
+ type DWB_OGAM_MODE_CURRENT;\
+ type DWB_OGAM_SELECT_CURRENT;\
+ type DWB_OGAM_LUT_INDEX;\
+ type DWB_OGAM_LUT_DATA;\
+ type DWB_OGAM_LUT_WRITE_COLOR_MASK;\
+ type DWB_OGAM_LUT_READ_COLOR_SEL;\
+ type DWB_OGAM_LUT_READ_DBG;\
+ type DWB_OGAM_LUT_HOST_SEL;\
+ type DWB_OGAM_LUT_CONFIG_MODE;\
+ type DWB_OGAM_LUT_STATUS;\
+ type DWB_OGAM_RAMA_EXP_REGION_START_B;\
+ type DWB_OGAM_RAMA_EXP_REGION_START_SEGMENT_B;\
+ type DWB_OGAM_RAMA_EXP_REGION_START_G;\
+ type DWB_OGAM_RAMA_EXP_REGION_START_SEGMENT_G;\
+ type DWB_OGAM_RAMA_EXP_REGION_START_R;\
+ type DWB_OGAM_RAMA_EXP_REGION_START_SEGMENT_R;\
+ type DWB_OGAM_RAMA_EXP_REGION_START_BASE_B;\
+ type DWB_OGAM_RAMA_EXP_REGION_START_SLOPE_B;\
+ type DWB_OGAM_RAMA_EXP_REGION_START_BASE_G;\
+ type DWB_OGAM_RAMA_EXP_REGION_START_SLOPE_G;\
+ type DWB_OGAM_RAMA_EXP_REGION_START_BASE_R;\
+ type DWB_OGAM_RAMA_EXP_REGION_START_SLOPE_R;\
+ type DWB_OGAM_RAMA_EXP_REGION_END_BASE_B;\
+ type DWB_OGAM_RAMA_EXP_REGION_END_B;\
+ type DWB_OGAM_RAMA_EXP_REGION_END_SLOPE_B;\
+ type DWB_OGAM_RAMA_EXP_REGION_END_BASE_G;\
+ type DWB_OGAM_RAMA_EXP_REGION_END_G;\
+ type DWB_OGAM_RAMA_EXP_REGION_END_SLOPE_G;\
+ type DWB_OGAM_RAMA_EXP_REGION_END_BASE_R;\
+ type DWB_OGAM_RAMA_EXP_REGION_END_R;\
+ type DWB_OGAM_RAMA_EXP_REGION_END_SLOPE_R;\
+ type DWB_OGAM_RAMA_OFFSET_B;\
+ type DWB_OGAM_RAMA_OFFSET_G;\
+ type DWB_OGAM_RAMA_OFFSET_R;\
+ type DWB_OGAM_RAMA_EXP_REGION0_LUT_OFFSET;\
+ type DWB_OGAM_RAMA_EXP_REGION0_NUM_SEGMENTS;\
+ type DWB_OGAM_RAMA_EXP_REGION1_LUT_OFFSET;\
+ type DWB_OGAM_RAMA_EXP_REGION1_NUM_SEGMENTS;\
+ type DWB_OGAM_RAMA_EXP_REGION2_LUT_OFFSET;\
+ type DWB_OGAM_RAMA_EXP_REGION2_NUM_SEGMENTS;\
+ type DWB_OGAM_RAMA_EXP_REGION3_LUT_OFFSET;\
+ type DWB_OGAM_RAMA_EXP_REGION3_NUM_SEGMENTS;\
+ type DWB_OGAM_RAMA_EXP_REGION4_LUT_OFFSET;\
+ type DWB_OGAM_RAMA_EXP_REGION4_NUM_SEGMENTS;\
+ type DWB_OGAM_RAMA_EXP_REGION5_LUT_OFFSET;\
+ type DWB_OGAM_RAMA_EXP_REGION5_NUM_SEGMENTS;\
+ type DWB_OGAM_RAMA_EXP_REGION6_LUT_OFFSET;\
+ type DWB_OGAM_RAMA_EXP_REGION6_NUM_SEGMENTS;\
+ type DWB_OGAM_RAMA_EXP_REGION7_LUT_OFFSET;\
+ type DWB_OGAM_RAMA_EXP_REGION7_NUM_SEGMENTS;\
+ type DWB_OGAM_RAMA_EXP_REGION8_LUT_OFFSET;\
+ type DWB_OGAM_RAMA_EXP_REGION8_NUM_SEGMENTS;\
+ type DWB_OGAM_RAMA_EXP_REGION9_LUT_OFFSET;\
+ type DWB_OGAM_RAMA_EXP_REGION9_NUM_SEGMENTS;\
+ type DWB_OGAM_RAMA_EXP_REGION10_LUT_OFFSET;\
+ type DWB_OGAM_RAMA_EXP_REGION10_NUM_SEGMENTS;\
+ type DWB_OGAM_RAMA_EXP_REGION11_LUT_OFFSET;\
+ type DWB_OGAM_RAMA_EXP_REGION11_NUM_SEGMENTS;\
+ type DWB_OGAM_RAMA_EXP_REGION12_LUT_OFFSET;\
+ type DWB_OGAM_RAMA_EXP_REGION12_NUM_SEGMENTS;\
+ type DWB_OGAM_RAMA_EXP_REGION13_LUT_OFFSET;\
+ type DWB_OGAM_RAMA_EXP_REGION13_NUM_SEGMENTS;\
+ type DWB_OGAM_RAMA_EXP_REGION14_LUT_OFFSET;\
+ type DWB_OGAM_RAMA_EXP_REGION14_NUM_SEGMENTS;\
+ type DWB_OGAM_RAMA_EXP_REGION15_LUT_OFFSET;\
+ type DWB_OGAM_RAMA_EXP_REGION15_NUM_SEGMENTS;\
+ type DWB_OGAM_RAMA_EXP_REGION16_LUT_OFFSET;\
+ type DWB_OGAM_RAMA_EXP_REGION16_NUM_SEGMENTS;\
+ type DWB_OGAM_RAMA_EXP_REGION17_LUT_OFFSET;\
+ type DWB_OGAM_RAMA_EXP_REGION17_NUM_SEGMENTS;\
+ type DWB_OGAM_RAMA_EXP_REGION18_LUT_OFFSET;\
+ type DWB_OGAM_RAMA_EXP_REGION18_NUM_SEGMENTS;\
+ type DWB_OGAM_RAMA_EXP_REGION19_LUT_OFFSET;\
+ type DWB_OGAM_RAMA_EXP_REGION19_NUM_SEGMENTS;\
+ type DWB_OGAM_RAMA_EXP_REGION20_LUT_OFFSET;\
+ type DWB_OGAM_RAMA_EXP_REGION20_NUM_SEGMENTS;\
+ type DWB_OGAM_RAMA_EXP_REGION21_LUT_OFFSET;\
+ type DWB_OGAM_RAMA_EXP_REGION21_NUM_SEGMENTS;\
+ type DWB_OGAM_RAMA_EXP_REGION22_LUT_OFFSET;\
+ type DWB_OGAM_RAMA_EXP_REGION22_NUM_SEGMENTS;\
+ type DWB_OGAM_RAMA_EXP_REGION23_LUT_OFFSET;\
+ type DWB_OGAM_RAMA_EXP_REGION23_NUM_SEGMENTS;\
+ type DWB_OGAM_RAMA_EXP_REGION24_LUT_OFFSET;\
+ type DWB_OGAM_RAMA_EXP_REGION24_NUM_SEGMENTS;\
+ type DWB_OGAM_RAMA_EXP_REGION25_LUT_OFFSET;\
+ type DWB_OGAM_RAMA_EXP_REGION25_NUM_SEGMENTS;\
+ type DWB_OGAM_RAMA_EXP_REGION26_LUT_OFFSET;\
+ type DWB_OGAM_RAMA_EXP_REGION26_NUM_SEGMENTS;\
+ type DWB_OGAM_RAMA_EXP_REGION27_LUT_OFFSET;\
+ type DWB_OGAM_RAMA_EXP_REGION27_NUM_SEGMENTS;\
+ type DWB_OGAM_RAMA_EXP_REGION28_LUT_OFFSET;\
+ type DWB_OGAM_RAMA_EXP_REGION28_NUM_SEGMENTS;\
+ type DWB_OGAM_RAMA_EXP_REGION29_LUT_OFFSET;\
+ type DWB_OGAM_RAMA_EXP_REGION29_NUM_SEGMENTS;\
+ type DWB_OGAM_RAMA_EXP_REGION30_LUT_OFFSET;\
+ type DWB_OGAM_RAMA_EXP_REGION30_NUM_SEGMENTS;\
+ type DWB_OGAM_RAMA_EXP_REGION31_LUT_OFFSET;\
+ type DWB_OGAM_RAMA_EXP_REGION31_NUM_SEGMENTS;\
+ type DWB_OGAM_RAMA_EXP_REGION32_LUT_OFFSET;\
+ type DWB_OGAM_RAMA_EXP_REGION32_NUM_SEGMENTS;\
+ type DWB_OGAM_RAMA_EXP_REGION33_LUT_OFFSET;\
+ type DWB_OGAM_RAMA_EXP_REGION33_NUM_SEGMENTS;\
+ type DWB_OGAM_RAMB_EXP_REGION_START_B;\
+ type DWB_OGAM_RAMB_EXP_REGION_START_SEGMENT_B;\
+ type DWB_OGAM_RAMB_EXP_REGION_START_G;\
+ type DWB_OGAM_RAMB_EXP_REGION_START_SEGMENT_G;\
+ type DWB_OGAM_RAMB_EXP_REGION_START_R;\
+ type DWB_OGAM_RAMB_EXP_REGION_START_SEGMENT_R;\
+ type DWB_OGAM_RAMB_EXP_REGION_START_BASE_B;\
+ type DWB_OGAM_RAMB_EXP_REGION_START_SLOPE_B;\
+ type DWB_OGAM_RAMB_EXP_REGION_START_BASE_G;\
+ type DWB_OGAM_RAMB_EXP_REGION_START_SLOPE_G;\
+ type DWB_OGAM_RAMB_EXP_REGION_START_BASE_R;\
+ type DWB_OGAM_RAMB_EXP_REGION_START_SLOPE_R;\
+ type DWB_OGAM_RAMB_EXP_REGION_END_BASE_B;\
+ type DWB_OGAM_RAMB_EXP_REGION_END_B;\
+ type DWB_OGAM_RAMB_EXP_REGION_END_SLOPE_B;\
+ type DWB_OGAM_RAMB_EXP_REGION_END_BASE_G;\
+ type DWB_OGAM_RAMB_EXP_REGION_END_G;\
+ type DWB_OGAM_RAMB_EXP_REGION_END_SLOPE_G;\
+ type DWB_OGAM_RAMB_EXP_REGION_END_BASE_R;\
+ type DWB_OGAM_RAMB_EXP_REGION_END_R;\
+ type DWB_OGAM_RAMB_EXP_REGION_END_SLOPE_R;\
+ type DWB_OGAM_RAMB_OFFSET_B;\
+ type DWB_OGAM_RAMB_OFFSET_G;\
+ type DWB_OGAM_RAMB_OFFSET_R;\
+ type DWB_OGAM_RAMB_EXP_REGION0_LUT_OFFSET;\
+ type DWB_OGAM_RAMB_EXP_REGION0_NUM_SEGMENTS;\
+ type DWB_OGAM_RAMB_EXP_REGION1_LUT_OFFSET;\
+ type DWB_OGAM_RAMB_EXP_REGION1_NUM_SEGMENTS;\
+ type DWB_OGAM_RAMB_EXP_REGION2_LUT_OFFSET;\
+ type DWB_OGAM_RAMB_EXP_REGION2_NUM_SEGMENTS;\
+ type DWB_OGAM_RAMB_EXP_REGION3_LUT_OFFSET;\
+ type DWB_OGAM_RAMB_EXP_REGION3_NUM_SEGMENTS;\
+ type DWB_OGAM_RAMB_EXP_REGION4_LUT_OFFSET;\
+ type DWB_OGAM_RAMB_EXP_REGION4_NUM_SEGMENTS;\
+ type DWB_OGAM_RAMB_EXP_REGION5_LUT_OFFSET;\
+ type DWB_OGAM_RAMB_EXP_REGION5_NUM_SEGMENTS;\
+ type DWB_OGAM_RAMB_EXP_REGION6_LUT_OFFSET;\
+ type DWB_OGAM_RAMB_EXP_REGION6_NUM_SEGMENTS;\
+ type DWB_OGAM_RAMB_EXP_REGION7_LUT_OFFSET;\
+ type DWB_OGAM_RAMB_EXP_REGION7_NUM_SEGMENTS;\
+ type DWB_OGAM_RAMB_EXP_REGION8_LUT_OFFSET;\
+ type DWB_OGAM_RAMB_EXP_REGION8_NUM_SEGMENTS;\
+ type DWB_OGAM_RAMB_EXP_REGION9_LUT_OFFSET;\
+ type DWB_OGAM_RAMB_EXP_REGION9_NUM_SEGMENTS;\
+ type DWB_OGAM_RAMB_EXP_REGION10_LUT_OFFSET;\
+ type DWB_OGAM_RAMB_EXP_REGION10_NUM_SEGMENTS;\
+ type DWB_OGAM_RAMB_EXP_REGION11_LUT_OFFSET;\
+ type DWB_OGAM_RAMB_EXP_REGION11_NUM_SEGMENTS;\
+ type DWB_OGAM_RAMB_EXP_REGION12_LUT_OFFSET;\
+ type DWB_OGAM_RAMB_EXP_REGION12_NUM_SEGMENTS;\
+ type DWB_OGAM_RAMB_EXP_REGION13_LUT_OFFSET;\
+ type DWB_OGAM_RAMB_EXP_REGION13_NUM_SEGMENTS;\
+ type DWB_OGAM_RAMB_EXP_REGION14_LUT_OFFSET;\
+ type DWB_OGAM_RAMB_EXP_REGION14_NUM_SEGMENTS;\
+ type DWB_OGAM_RAMB_EXP_REGION15_LUT_OFFSET;\
+ type DWB_OGAM_RAMB_EXP_REGION15_NUM_SEGMENTS;\
+ type DWB_OGAM_RAMB_EXP_REGION16_LUT_OFFSET;\
+ type DWB_OGAM_RAMB_EXP_REGION16_NUM_SEGMENTS;\
+ type DWB_OGAM_RAMB_EXP_REGION17_LUT_OFFSET;\
+ type DWB_OGAM_RAMB_EXP_REGION17_NUM_SEGMENTS;\
+ type DWB_OGAM_RAMB_EXP_REGION18_LUT_OFFSET;\
+ type DWB_OGAM_RAMB_EXP_REGION18_NUM_SEGMENTS;\
+ type DWB_OGAM_RAMB_EXP_REGION19_LUT_OFFSET;\
+ type DWB_OGAM_RAMB_EXP_REGION19_NUM_SEGMENTS;\
+ type DWB_OGAM_RAMB_EXP_REGION20_LUT_OFFSET;\
+ type DWB_OGAM_RAMB_EXP_REGION20_NUM_SEGMENTS;\
+ type DWB_OGAM_RAMB_EXP_REGION21_LUT_OFFSET;\
+ type DWB_OGAM_RAMB_EXP_REGION21_NUM_SEGMENTS;\
+ type DWB_OGAM_RAMB_EXP_REGION22_LUT_OFFSET;\
+ type DWB_OGAM_RAMB_EXP_REGION22_NUM_SEGMENTS;\
+ type DWB_OGAM_RAMB_EXP_REGION23_LUT_OFFSET;\
+ type DWB_OGAM_RAMB_EXP_REGION23_NUM_SEGMENTS;\
+ type DWB_OGAM_RAMB_EXP_REGION24_LUT_OFFSET;\
+ type DWB_OGAM_RAMB_EXP_REGION24_NUM_SEGMENTS;\
+ type DWB_OGAM_RAMB_EXP_REGION25_LUT_OFFSET;\
+ type DWB_OGAM_RAMB_EXP_REGION25_NUM_SEGMENTS;\
+ type DWB_OGAM_RAMB_EXP_REGION26_LUT_OFFSET;\
+ type DWB_OGAM_RAMB_EXP_REGION26_NUM_SEGMENTS;\
+ type DWB_OGAM_RAMB_EXP_REGION27_LUT_OFFSET;\
+ type DWB_OGAM_RAMB_EXP_REGION27_NUM_SEGMENTS;\
+ type DWB_OGAM_RAMB_EXP_REGION28_LUT_OFFSET;\
+ type DWB_OGAM_RAMB_EXP_REGION28_NUM_SEGMENTS;\
+ type DWB_OGAM_RAMB_EXP_REGION29_LUT_OFFSET;\
+ type DWB_OGAM_RAMB_EXP_REGION29_NUM_SEGMENTS;\
+ type DWB_OGAM_RAMB_EXP_REGION30_LUT_OFFSET;\
+ type DWB_OGAM_RAMB_EXP_REGION30_NUM_SEGMENTS;\
+ type DWB_OGAM_RAMB_EXP_REGION31_LUT_OFFSET;\
+ type DWB_OGAM_RAMB_EXP_REGION31_NUM_SEGMENTS;\
+ type DWB_OGAM_RAMB_EXP_REGION32_LUT_OFFSET;\
+ type DWB_OGAM_RAMB_EXP_REGION32_NUM_SEGMENTS;\
+ type DWB_OGAM_RAMB_EXP_REGION33_LUT_OFFSET;\
+ type DWB_OGAM_RAMB_EXP_REGION33_NUM_SEGMENTS;
+
+struct dcn30_dwbc_registers {
+ /* DCN3AG */
+ /* DWB_TOP */
+ uint32_t DWB_ENABLE_CLK_CTRL;
+ uint32_t DWB_MEM_PWR_CTRL;
+ uint32_t FC_MODE_CTRL;
+ uint32_t FC_FLOW_CTRL;
+ uint32_t FC_WINDOW_START;
+ uint32_t FC_WINDOW_SIZE;
+ uint32_t FC_SOURCE_SIZE;
+ uint32_t DWB_UPDATE_CTRL;
+ uint32_t DWB_CRC_CTRL;
+ uint32_t DWB_CRC_MASK_R_G;
+ uint32_t DWB_CRC_MASK_B_A;
+ uint32_t DWB_CRC_VAL_R_G;
+ uint32_t DWB_CRC_VAL_B_A;
+ uint32_t DWB_OUT_CTRL;
+ uint32_t DWB_MMHUBBUB_BACKPRESSURE_CNT_EN;
+ uint32_t DWB_MMHUBBUB_BACKPRESSURE_CNT;
+ uint32_t DWB_HOST_READ_CONTROL;
+ uint32_t DWB_SOFT_RESET;
+
+ /* DWBSCL */
+ uint32_t DWBSCL_COEF_RAM_TAP_SELECT;
+ uint32_t DWBSCL_COEF_RAM_TAP_DATA;
+ uint32_t DWBSCL_MODE;
+ uint32_t DWBSCL_TAP_CONTROL;
+ uint32_t DWBSCL_HORZ_FILTER_SCALE_RATIO;
+ uint32_t DWBSCL_HORZ_FILTER_INIT;
+ uint32_t DWBSCL_VERT_FILTER_SCALE_RATIO;
+ uint32_t DWBSCL_VERT_FILTER_INIT;
+ uint32_t DWBSCL_BOUNDARY_CTRL;
+ uint32_t DWBSCL_DEST_SIZE;
+ uint32_t DWBSCL_OVERFLOW_STATUS;
+ uint32_t DWBSCL_OVERFLOW_COUNTER;
+
+ /* DWBCP */
+ uint32_t DWB_HDR_MULT_COEF;
+ uint32_t DWB_GAMUT_REMAP_MODE;
+ uint32_t DWB_GAMUT_REMAP_COEF_FORMAT;
+ uint32_t DWB_GAMUT_REMAPA_C11_C12;
+ uint32_t DWB_GAMUT_REMAPA_C13_C14;
+ uint32_t DWB_GAMUT_REMAPA_C21_C22;
+ uint32_t DWB_GAMUT_REMAPA_C23_C24;
+ uint32_t DWB_GAMUT_REMAPA_C31_C32;
+ uint32_t DWB_GAMUT_REMAPA_C33_C34;
+ uint32_t DWB_GAMUT_REMAPB_C11_C12;
+ uint32_t DWB_GAMUT_REMAPB_C13_C14;
+ uint32_t DWB_GAMUT_REMAPB_C21_C22;
+ uint32_t DWB_GAMUT_REMAPB_C23_C24;
+ uint32_t DWB_GAMUT_REMAPB_C31_C32;
+ uint32_t DWB_GAMUT_REMAPB_C33_C34;
+ uint32_t DWB_OGAM_CONTROL;
+ uint32_t DWB_OGAM_LUT_INDEX;
+ uint32_t DWB_OGAM_LUT_DATA;
+ uint32_t DWB_OGAM_LUT_CONTROL;
+ uint32_t DWB_OGAM_RAMA_START_CNTL_B;
+ uint32_t DWB_OGAM_RAMA_START_CNTL_G;
+ uint32_t DWB_OGAM_RAMA_START_CNTL_R;
+ uint32_t DWB_OGAM_RAMA_START_BASE_CNTL_B;
+ uint32_t DWB_OGAM_RAMA_START_SLOPE_CNTL_B;
+ uint32_t DWB_OGAM_RAMA_START_BASE_CNTL_G;
+ uint32_t DWB_OGAM_RAMA_START_SLOPE_CNTL_G;
+ uint32_t DWB_OGAM_RAMA_START_BASE_CNTL_R;
+ uint32_t DWB_OGAM_RAMA_START_SLOPE_CNTL_R;
+ uint32_t DWB_OGAM_RAMA_END_CNTL1_B;
+ uint32_t DWB_OGAM_RAMA_END_CNTL2_B;
+ uint32_t DWB_OGAM_RAMA_END_CNTL1_G;
+ uint32_t DWB_OGAM_RAMA_END_CNTL2_G;
+ uint32_t DWB_OGAM_RAMA_END_CNTL1_R;
+ uint32_t DWB_OGAM_RAMA_END_CNTL2_R;
+ uint32_t DWB_OGAM_RAMA_OFFSET_B;
+ uint32_t DWB_OGAM_RAMA_OFFSET_G;
+ uint32_t DWB_OGAM_RAMA_OFFSET_R;
+ uint32_t DWB_OGAM_RAMA_REGION_0_1;
+ uint32_t DWB_OGAM_RAMA_REGION_2_3;
+ uint32_t DWB_OGAM_RAMA_REGION_4_5;
+ uint32_t DWB_OGAM_RAMA_REGION_6_7;
+ uint32_t DWB_OGAM_RAMA_REGION_8_9;
+ uint32_t DWB_OGAM_RAMA_REGION_10_11;
+ uint32_t DWB_OGAM_RAMA_REGION_12_13;
+ uint32_t DWB_OGAM_RAMA_REGION_14_15;
+ uint32_t DWB_OGAM_RAMA_REGION_16_17;
+ uint32_t DWB_OGAM_RAMA_REGION_18_19;
+ uint32_t DWB_OGAM_RAMA_REGION_20_21;
+ uint32_t DWB_OGAM_RAMA_REGION_22_23;
+ uint32_t DWB_OGAM_RAMA_REGION_24_25;
+ uint32_t DWB_OGAM_RAMA_REGION_26_27;
+ uint32_t DWB_OGAM_RAMA_REGION_28_29;
+ uint32_t DWB_OGAM_RAMA_REGION_30_31;
+ uint32_t DWB_OGAM_RAMA_REGION_32_33;
+ uint32_t DWB_OGAM_RAMB_START_CNTL_B;
+ uint32_t DWB_OGAM_RAMB_START_CNTL_G;
+ uint32_t DWB_OGAM_RAMB_START_CNTL_R;
+ uint32_t DWB_OGAM_RAMB_START_BASE_CNTL_B;
+ uint32_t DWB_OGAM_RAMB_START_SLOPE_CNTL_B;
+ uint32_t DWB_OGAM_RAMB_START_BASE_CNTL_G;
+ uint32_t DWB_OGAM_RAMB_START_SLOPE_CNTL_G;
+ uint32_t DWB_OGAM_RAMB_START_BASE_CNTL_R;
+ uint32_t DWB_OGAM_RAMB_START_SLOPE_CNTL_R;
+ uint32_t DWB_OGAM_RAMB_END_CNTL1_B;
+ uint32_t DWB_OGAM_RAMB_END_CNTL2_B;
+ uint32_t DWB_OGAM_RAMB_END_CNTL1_G;
+ uint32_t DWB_OGAM_RAMB_END_CNTL2_G;
+ uint32_t DWB_OGAM_RAMB_END_CNTL1_R;
+ uint32_t DWB_OGAM_RAMB_END_CNTL2_R;
+ uint32_t DWB_OGAM_RAMB_OFFSET_B;
+ uint32_t DWB_OGAM_RAMB_OFFSET_G;
+ uint32_t DWB_OGAM_RAMB_OFFSET_R;
+ uint32_t DWB_OGAM_RAMB_REGION_0_1;
+ uint32_t DWB_OGAM_RAMB_REGION_2_3;
+ uint32_t DWB_OGAM_RAMB_REGION_4_5;
+ uint32_t DWB_OGAM_RAMB_REGION_6_7;
+ uint32_t DWB_OGAM_RAMB_REGION_8_9;
+ uint32_t DWB_OGAM_RAMB_REGION_10_11;
+ uint32_t DWB_OGAM_RAMB_REGION_12_13;
+ uint32_t DWB_OGAM_RAMB_REGION_14_15;
+ uint32_t DWB_OGAM_RAMB_REGION_16_17;
+ uint32_t DWB_OGAM_RAMB_REGION_18_19;
+ uint32_t DWB_OGAM_RAMB_REGION_20_21;
+ uint32_t DWB_OGAM_RAMB_REGION_22_23;
+ uint32_t DWB_OGAM_RAMB_REGION_24_25;
+ uint32_t DWB_OGAM_RAMB_REGION_26_27;
+ uint32_t DWB_OGAM_RAMB_REGION_28_29;
+ uint32_t DWB_OGAM_RAMB_REGION_30_31;
+ uint32_t DWB_OGAM_RAMB_REGION_32_33;
+};
+
+/* Internal enums / structs */
+enum dwbscl_coef_filter_type_sel {
+ DWBSCL_COEF_RAM_FILTER_TYPE_VERT_RGB = 0,
+ DWBSCL_COEF_RAM_FILTER_TYPE_HORZ_RGB = 1
+};
+
+
+struct dcn30_dwbc_mask {
+ DWBC_REG_FIELD_LIST_DCN3_0(uint32_t);
+};
+
+struct dcn30_dwbc_shift {
+ DWBC_REG_FIELD_LIST_DCN3_0(uint8_t);
+};
+
+struct dcn30_dwbc {
+ struct dwbc base;
+ const struct dcn30_dwbc_registers *dwbc_regs;
+ const struct dcn30_dwbc_shift *dwbc_shift;
+ const struct dcn30_dwbc_mask *dwbc_mask;
+};
+
+void dcn30_dwbc_construct(struct dcn30_dwbc *dwbc30,
+ struct dc_context *ctx,
+ const struct dcn30_dwbc_registers *dwbc_regs,
+ const struct dcn30_dwbc_shift *dwbc_shift,
+ const struct dcn30_dwbc_mask *dwbc_mask,
+ int inst);
+
+bool dwb3_enable(struct dwbc *dwbc, struct dc_dwb_params *params);
+
+bool dwb3_disable(struct dwbc *dwbc);
+
+bool dwb3_update(struct dwbc *dwbc, struct dc_dwb_params *params);
+
+bool dwb3_is_enabled(struct dwbc *dwbc);
+
+void dwb3_set_stereo(struct dwbc *dwbc,
+ struct dwb_stereo_params *stereo_params);
+
+void dwb3_set_new_content(struct dwbc *dwbc,
+ bool is_new_content);
+
+void dwb3_config_fc(struct dwbc *dwbc,
+ struct dc_dwb_params *params);
+
+void dwb3_set_denorm(struct dwbc *dwbc, struct dc_dwb_params *params);
+
+void dwb3_program_hdr_mult(
+ struct dwbc *dwbc,
+ const struct dc_dwb_params *params);
+
+void dwb3_set_gamut_remap(
+ struct dwbc *dwbc,
+ const struct dc_dwb_params *params);
+
+bool dwb3_ogam_set_input_transfer_func(
+ struct dwbc *dwbc,
+ const struct dc_transfer_func *in_transfer_func_dwb_ogam);
+
+void dwb3_set_host_read_rate_control(struct dwbc *dwbc, bool host_read_delay);
+#endif
+
+
diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dwb_cm.c b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dwb_cm.c
new file mode 100644
index 000000000000..8593145379d9
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dwb_cm.c
@@ -0,0 +1,354 @@
+/*
+ * Copyright 2020 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#include "reg_helper.h"
+#include "fixed31_32.h"
+#include "resource.h"
+#include "basics/conversion.h"
+#include "dwb.h"
+#include "dcn30_dwb.h"
+#include "dcn30_cm_common.h"
+#include "dcn10/dcn10_cm_common.h"
+
+
+#define REG(reg)\
+ dwbc30->dwbc_regs->reg
+
+#define CTX \
+ dwbc30->base.ctx
+
+#undef FN
+#define FN(reg_name, field_name) \
+ dwbc30->dwbc_shift->field_name, dwbc30->dwbc_mask->field_name
+
+#define TO_DCN30_DWBC(dwbc_base) \
+ container_of(dwbc_base, struct dcn30_dwbc, base)
+
+static void dwb3_get_reg_field_ogam(struct dcn30_dwbc *dwbc30,
+ struct dcn3_xfer_func_reg *reg)
+{
+ reg->shifts.exp_region0_lut_offset = dwbc30->dwbc_shift->DWB_OGAM_RAMA_EXP_REGION0_LUT_OFFSET;
+ reg->masks.exp_region0_lut_offset = dwbc30->dwbc_mask->DWB_OGAM_RAMA_EXP_REGION0_LUT_OFFSET;
+ reg->shifts.exp_region0_num_segments = dwbc30->dwbc_shift->DWB_OGAM_RAMA_EXP_REGION0_NUM_SEGMENTS;
+ reg->masks.exp_region0_num_segments = dwbc30->dwbc_mask->DWB_OGAM_RAMA_EXP_REGION0_NUM_SEGMENTS;
+ reg->shifts.exp_region1_lut_offset = dwbc30->dwbc_shift->DWB_OGAM_RAMA_EXP_REGION1_LUT_OFFSET;
+ reg->masks.exp_region1_lut_offset = dwbc30->dwbc_mask->DWB_OGAM_RAMA_EXP_REGION1_LUT_OFFSET;
+ reg->shifts.exp_region1_num_segments = dwbc30->dwbc_shift->DWB_OGAM_RAMA_EXP_REGION1_NUM_SEGMENTS;
+ reg->masks.exp_region1_num_segments = dwbc30->dwbc_mask->DWB_OGAM_RAMA_EXP_REGION1_NUM_SEGMENTS;
+
+ reg->shifts.field_region_end = dwbc30->dwbc_shift->DWB_OGAM_RAMA_EXP_REGION_END_B;
+ reg->masks.field_region_end = dwbc30->dwbc_mask->DWB_OGAM_RAMA_EXP_REGION_END_B;
+ reg->shifts.field_region_end_slope = dwbc30->dwbc_shift->DWB_OGAM_RAMA_EXP_REGION_END_SLOPE_B;
+ reg->masks.field_region_end_slope = dwbc30->dwbc_mask->DWB_OGAM_RAMA_EXP_REGION_END_SLOPE_B;
+ reg->shifts.field_region_end_base = dwbc30->dwbc_shift->DWB_OGAM_RAMA_EXP_REGION_END_BASE_B;
+ reg->masks.field_region_end_base = dwbc30->dwbc_mask->DWB_OGAM_RAMA_EXP_REGION_END_BASE_B;
+ reg->shifts.field_region_linear_slope = dwbc30->dwbc_shift->DWB_OGAM_RAMA_EXP_REGION_START_SLOPE_B;
+ reg->masks.field_region_linear_slope = dwbc30->dwbc_mask->DWB_OGAM_RAMA_EXP_REGION_START_SLOPE_B;
+ reg->masks.field_offset = dwbc30->dwbc_mask->DWB_OGAM_RAMA_OFFSET_B;
+ reg->shifts.field_offset = dwbc30->dwbc_shift->DWB_OGAM_RAMA_OFFSET_B;
+ reg->shifts.exp_region_start = dwbc30->dwbc_shift->DWB_OGAM_RAMA_EXP_REGION_START_B;
+ reg->masks.exp_region_start = dwbc30->dwbc_mask->DWB_OGAM_RAMA_EXP_REGION_START_B;
+ reg->shifts.exp_resion_start_segment = dwbc30->dwbc_shift->DWB_OGAM_RAMA_EXP_REGION_START_SEGMENT_B;
+ reg->masks.exp_resion_start_segment = dwbc30->dwbc_mask->DWB_OGAM_RAMA_EXP_REGION_START_SEGMENT_B;
+}
+
+/*program dwb ogam RAM A*/
+static void dwb3_program_ogam_luta_settings(
+ struct dcn30_dwbc *dwbc30,
+ const struct pwl_params *params)
+{
+ struct dcn3_xfer_func_reg gam_regs;
+
+ dwb3_get_reg_field_ogam(dwbc30, &gam_regs);
+
+ gam_regs.start_cntl_b = REG(DWB_OGAM_RAMA_START_CNTL_B);
+ gam_regs.start_cntl_g = REG(DWB_OGAM_RAMA_START_CNTL_G);
+ gam_regs.start_cntl_r = REG(DWB_OGAM_RAMA_START_CNTL_R);
+ gam_regs.start_base_cntl_b = REG(DWB_OGAM_RAMA_START_BASE_CNTL_B);
+ gam_regs.start_base_cntl_g = REG(DWB_OGAM_RAMA_START_BASE_CNTL_G);
+ gam_regs.start_base_cntl_r = REG(DWB_OGAM_RAMA_START_BASE_CNTL_R);
+ gam_regs.start_slope_cntl_b = REG(DWB_OGAM_RAMA_START_SLOPE_CNTL_B);
+ gam_regs.start_slope_cntl_g = REG(DWB_OGAM_RAMA_START_SLOPE_CNTL_G);
+ gam_regs.start_slope_cntl_r = REG(DWB_OGAM_RAMA_START_SLOPE_CNTL_R);
+ gam_regs.start_end_cntl1_b = REG(DWB_OGAM_RAMA_END_CNTL1_B);
+ gam_regs.start_end_cntl2_b = REG(DWB_OGAM_RAMA_END_CNTL2_B);
+ gam_regs.start_end_cntl1_g = REG(DWB_OGAM_RAMA_END_CNTL1_G);
+ gam_regs.start_end_cntl2_g = REG(DWB_OGAM_RAMA_END_CNTL2_G);
+ gam_regs.start_end_cntl1_r = REG(DWB_OGAM_RAMA_END_CNTL1_R);
+ gam_regs.start_end_cntl2_r = REG(DWB_OGAM_RAMA_END_CNTL2_R);
+ gam_regs.offset_b = REG(DWB_OGAM_RAMA_OFFSET_B);
+ gam_regs.offset_g = REG(DWB_OGAM_RAMA_OFFSET_G);
+ gam_regs.offset_r = REG(DWB_OGAM_RAMA_OFFSET_R);
+ gam_regs.region_start = REG(DWB_OGAM_RAMA_REGION_0_1);
+ gam_regs.region_end = REG(DWB_OGAM_RAMA_REGION_32_33);
+ /*todo*/
+ cm_helper_program_gamcor_xfer_func(dwbc30->base.ctx, params, &gam_regs);
+}
+
+/*program dwb ogam RAM B*/
+static void dwb3_program_ogam_lutb_settings(
+ struct dcn30_dwbc *dwbc30,
+ const struct pwl_params *params)
+{
+ struct dcn3_xfer_func_reg gam_regs;
+
+ dwb3_get_reg_field_ogam(dwbc30, &gam_regs);
+
+ gam_regs.start_cntl_b = REG(DWB_OGAM_RAMB_START_CNTL_B);
+ gam_regs.start_cntl_g = REG(DWB_OGAM_RAMB_START_CNTL_G);
+ gam_regs.start_cntl_r = REG(DWB_OGAM_RAMB_START_CNTL_R);
+ gam_regs.start_base_cntl_b = REG(DWB_OGAM_RAMB_START_BASE_CNTL_B);
+ gam_regs.start_base_cntl_g = REG(DWB_OGAM_RAMB_START_BASE_CNTL_G);
+ gam_regs.start_base_cntl_r = REG(DWB_OGAM_RAMB_START_BASE_CNTL_R);
+ gam_regs.start_slope_cntl_b = REG(DWB_OGAM_RAMB_START_SLOPE_CNTL_B);
+ gam_regs.start_slope_cntl_g = REG(DWB_OGAM_RAMB_START_SLOPE_CNTL_G);
+ gam_regs.start_slope_cntl_r = REG(DWB_OGAM_RAMB_START_SLOPE_CNTL_R);
+ gam_regs.start_end_cntl1_b = REG(DWB_OGAM_RAMB_END_CNTL1_B);
+ gam_regs.start_end_cntl2_b = REG(DWB_OGAM_RAMB_END_CNTL2_B);
+ gam_regs.start_end_cntl1_g = REG(DWB_OGAM_RAMB_END_CNTL1_G);
+ gam_regs.start_end_cntl2_g = REG(DWB_OGAM_RAMB_END_CNTL2_G);
+ gam_regs.start_end_cntl1_r = REG(DWB_OGAM_RAMB_END_CNTL1_R);
+ gam_regs.start_end_cntl2_r = REG(DWB_OGAM_RAMB_END_CNTL2_R);
+ gam_regs.offset_b = REG(DWB_OGAM_RAMB_OFFSET_B);
+ gam_regs.offset_g = REG(DWB_OGAM_RAMB_OFFSET_G);
+ gam_regs.offset_r = REG(DWB_OGAM_RAMB_OFFSET_R);
+ gam_regs.region_start = REG(DWB_OGAM_RAMB_REGION_0_1);
+ gam_regs.region_end = REG(DWB_OGAM_RAMB_REGION_32_33);
+
+ cm_helper_program_gamcor_xfer_func(dwbc30->base.ctx, params, &gam_regs);
+}
+
+static enum dc_lut_mode dwb3_get_ogam_current(
+ struct dcn30_dwbc *dwbc30)
+{
+ enum dc_lut_mode mode;
+ uint32_t state_mode;
+ uint32_t ram_select;
+
+ REG_GET(DWB_OGAM_CONTROL,
+ DWB_OGAM_MODE, &state_mode);
+ REG_GET(DWB_OGAM_CONTROL,
+ DWB_OGAM_SELECT, &ram_select);
+
+ if (state_mode == 0) {
+ mode = LUT_BYPASS;
+ } else if (state_mode == 2) {
+ if (ram_select == 0)
+ mode = LUT_RAM_A;
+ else
+ mode = LUT_RAM_B;
+ } else {
+ // Reserved value
+ mode = LUT_BYPASS;
+ BREAK_TO_DEBUGGER();
+ return mode;
+ }
+ return mode;
+}
+
+static void dwb3_configure_ogam_lut(
+ struct dcn30_dwbc *dwbc30,
+ bool is_ram_a)
+{
+ REG_UPDATE(DWB_OGAM_LUT_CONTROL,
+ DWB_OGAM_LUT_READ_COLOR_SEL, 7);
+ REG_UPDATE(DWB_OGAM_CONTROL,
+ DWB_OGAM_SELECT, is_ram_a == true ? 0 : 1);
+ REG_SET(DWB_OGAM_LUT_INDEX, 0, DWB_OGAM_LUT_INDEX, 0);
+}
+
+static void dwb3_program_ogam_pwl(struct dcn30_dwbc *dwbc30,
+ const struct pwl_result_data *rgb,
+ uint32_t num)
+{
+ uint32_t i;
+
+ // triple base implementation
+ for (i = 0; i < num/2; i++) {
+ REG_SET(DWB_OGAM_LUT_DATA, 0, DWB_OGAM_LUT_DATA, rgb[2*i+0].red_reg);
+ REG_SET(DWB_OGAM_LUT_DATA, 0, DWB_OGAM_LUT_DATA, rgb[2*i+0].green_reg);
+ REG_SET(DWB_OGAM_LUT_DATA, 0, DWB_OGAM_LUT_DATA, rgb[2*i+0].blue_reg);
+ REG_SET(DWB_OGAM_LUT_DATA, 0, DWB_OGAM_LUT_DATA, rgb[2*i+1].red_reg);
+ REG_SET(DWB_OGAM_LUT_DATA, 0, DWB_OGAM_LUT_DATA, rgb[2*i+1].green_reg);
+ REG_SET(DWB_OGAM_LUT_DATA, 0, DWB_OGAM_LUT_DATA, rgb[2*i+1].blue_reg);
+ REG_SET(DWB_OGAM_LUT_DATA, 0, DWB_OGAM_LUT_DATA, rgb[2*i+2].red_reg);
+ REG_SET(DWB_OGAM_LUT_DATA, 0, DWB_OGAM_LUT_DATA, rgb[2*i+2].green_reg);
+ REG_SET(DWB_OGAM_LUT_DATA, 0, DWB_OGAM_LUT_DATA, rgb[2*i+2].blue_reg);
+ }
+}
+
+static bool dwb3_program_ogam_lut(
+ struct dcn30_dwbc *dwbc30,
+ const struct pwl_params *params)
+{
+ enum dc_lut_mode current_mode;
+ enum dc_lut_mode next_mode;
+
+ if (params == NULL) {
+ REG_SET(DWB_OGAM_CONTROL, 0, DWB_OGAM_MODE, 0);
+ return false;
+ }
+
+ current_mode = dwb3_get_ogam_current(dwbc30);
+ if (current_mode == LUT_BYPASS || current_mode == LUT_RAM_A)
+ next_mode = LUT_RAM_B;
+ else
+ next_mode = LUT_RAM_A;
+
+ dwb3_configure_ogam_lut(dwbc30, next_mode == LUT_RAM_A ? true : false);
+
+ if (next_mode == LUT_RAM_A)
+ dwb3_program_ogam_luta_settings(dwbc30, params);
+ else
+ dwb3_program_ogam_lutb_settings(dwbc30, params);
+
+ dwb3_program_ogam_pwl(
+ dwbc30, params->rgb_resulted, params->hw_points_num);
+
+ REG_SET(DWB_OGAM_CONTROL, 0, DWB_OGAM_MODE, 2);
+ REG_SET(DWB_OGAM_CONTROL, 0, DWB_OGAM_SELECT, next_mode == LUT_RAM_A ? 0 : 1);
+
+ return true;
+}
+
+bool dwb3_ogam_set_input_transfer_func(
+ struct dwbc *dwbc,
+ const struct dc_transfer_func *in_transfer_func_dwb_ogam)
+{
+ struct dcn30_dwbc *dwbc30 = TO_DCN30_DWBC(dwbc);
+ bool result = false;
+ struct pwl_params *dwb_ogam_lut = NULL;
+
+ if (in_transfer_func_dwb_ogam == NULL)
+ return result;
+
+ dwb_ogam_lut = kzalloc(sizeof(*dwb_ogam_lut), GFP_KERNEL);
+
+ if (dwb_ogam_lut) {
+ cm_helper_translate_curve_to_hw_format(
+ in_transfer_func_dwb_ogam,
+ dwb_ogam_lut, false);
+
+ result = dwb3_program_ogam_lut(
+ dwbc30,
+ dwb_ogam_lut);
+ kfree(dwb_ogam_lut);
+ dwb_ogam_lut = NULL;
+ }
+
+ return result;
+}
+
+static void dwb3_program_gamut_remap(
+ struct dwbc *dwbc,
+ const uint16_t *regval,
+ enum cm_gamut_coef_format coef_format,
+ enum cm_gamut_remap_select select)
+{
+ struct dcn30_dwbc *dwbc30 = TO_DCN30_DWBC(dwbc);
+
+ struct color_matrices_reg gam_regs;
+
+ REG_UPDATE(DWB_GAMUT_REMAP_COEF_FORMAT, DWB_GAMUT_REMAP_COEF_FORMAT, coef_format);
+
+ if (regval == NULL || select == CM_GAMUT_REMAP_MODE_BYPASS) {
+ REG_SET(DWB_GAMUT_REMAP_MODE, 0,
+ DWB_GAMUT_REMAP_MODE, 0);
+ return;
+ }
+
+ switch (select) {
+ case CM_GAMUT_REMAP_MODE_RAMA_COEFF:
+ gam_regs.csc_c11_c12 = REG(DWB_GAMUT_REMAPA_C11_C12);
+ gam_regs.csc_c33_c34 = REG(DWB_GAMUT_REMAPA_C33_C34);
+
+ cm_helper_program_color_matrices(
+ dwbc30->base.ctx,
+ regval,
+ &gam_regs);
+ break;
+ case CM_GAMUT_REMAP_MODE_RAMB_COEFF:
+ gam_regs.csc_c11_c12 = REG(DWB_GAMUT_REMAPB_C11_C12);
+ gam_regs.csc_c33_c34 = REG(DWB_GAMUT_REMAPB_C33_C34);
+
+ cm_helper_program_color_matrices(
+ dwbc30->base.ctx,
+ regval,
+ &gam_regs);
+ break;
+ case CM_GAMUT_REMAP_MODE_RESERVED:
+ /* should never happen, bug */
+ BREAK_TO_DEBUGGER();
+ return;
+ default:
+ break;
+ }
+
+ REG_SET(DWB_GAMUT_REMAP_MODE, 0,
+ DWB_GAMUT_REMAP_MODE, select);
+
+}
+
+void dwb3_set_gamut_remap(
+ struct dwbc *dwbc,
+ const struct dc_dwb_params *params)
+{
+ struct dcn30_dwbc *dwbc30 = TO_DCN30_DWBC(dwbc);
+ struct cm_grph_csc_adjustment adjust = params->csc_params;
+ int i = 0;
+
+ if (adjust.gamut_adjust_type != CM_GAMUT_ADJUST_TYPE_SW) {
+ /* Bypass if type is bypass or hw */
+ dwb3_program_gamut_remap(dwbc, NULL, adjust.gamut_coef_format, CM_GAMUT_REMAP_MODE_BYPASS);
+ } else {
+ struct fixed31_32 arr_matrix[12];
+ uint16_t arr_reg_val[12];
+ unsigned int current_mode;
+
+ for (i = 0; i < 12; i++)
+ arr_matrix[i] = adjust.temperature_matrix[i];
+
+ convert_float_matrix(arr_reg_val, arr_matrix, 12);
+
+ REG_GET(DWB_GAMUT_REMAP_MODE, DWB_GAMUT_REMAP_MODE_CURRENT, &current_mode);
+
+ if (current_mode == CM_GAMUT_REMAP_MODE_RAMA_COEFF) {
+ dwb3_program_gamut_remap(dwbc, arr_reg_val,
+ adjust.gamut_coef_format, CM_GAMUT_REMAP_MODE_RAMB_COEFF);
+ } else {
+ dwb3_program_gamut_remap(dwbc, arr_reg_val,
+ adjust.gamut_coef_format, CM_GAMUT_REMAP_MODE_RAMA_COEFF);
+ }
+ }
+}
+
+void dwb3_program_hdr_mult(
+ struct dwbc *dwbc,
+ const struct dc_dwb_params *params)
+{
+ struct dcn30_dwbc *dwbc30 = TO_DCN30_DWBC(dwbc);
+
+ REG_UPDATE(DWB_HDR_MULT_COEF, DWB_HDR_MULT_COEF, params->hdr_mult);
+}
diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hubbub.c b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hubbub.c
new file mode 100644
index 000000000000..982732dec133
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hubbub.c
@@ -0,0 +1,417 @@
+/*
+ * Copyright 2020 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+
+#include "dm_services.h"
+#include "reg_helper.h"
+#include "dcn30_hubbub.h"
+
+
+#define CTX \
+ hubbub1->base.ctx
+#define DC_LOGGER \
+ hubbub1->base.ctx->logger
+#define REG(reg)\
+ hubbub1->regs->reg
+
+#undef FN
+#define FN(reg_name, field_name) \
+ hubbub1->shifts->field_name, hubbub1->masks->field_name
+
+#ifdef NUM_VMID
+#undef NUM_VMID
+#endif
+#define NUM_VMID 16
+
+
+static uint32_t convert_and_clamp(
+ uint32_t wm_ns,
+ uint32_t refclk_mhz,
+ uint32_t clamp_value)
+{
+ uint32_t ret_val = 0;
+ ret_val = wm_ns * refclk_mhz;
+ ret_val /= 1000;
+
+ if (ret_val > clamp_value)
+ ret_val = clamp_value;
+
+ return ret_val;
+}
+
+int hubbub3_init_dchub_sys_ctx(struct hubbub *hubbub,
+ struct dcn_hubbub_phys_addr_config *pa_config)
+{
+ struct dcn20_hubbub *hubbub1 = TO_DCN20_HUBBUB(hubbub);
+ struct dcn_vmid_page_table_config phys_config;
+
+ REG_SET(DCN_VM_FB_LOCATION_BASE, 0,
+ FB_BASE, pa_config->system_aperture.fb_base >> 24);
+ REG_SET(DCN_VM_FB_LOCATION_TOP, 0,
+ FB_TOP, pa_config->system_aperture.fb_top >> 24);
+ REG_SET(DCN_VM_FB_OFFSET, 0,
+ FB_OFFSET, pa_config->system_aperture.fb_offset >> 24);
+ REG_SET(DCN_VM_AGP_BOT, 0,
+ AGP_BOT, pa_config->system_aperture.agp_bot >> 24);
+ REG_SET(DCN_VM_AGP_TOP, 0,
+ AGP_TOP, pa_config->system_aperture.agp_top >> 24);
+ REG_SET(DCN_VM_AGP_BASE, 0,
+ AGP_BASE, pa_config->system_aperture.agp_base >> 24);
+
+ if (pa_config->gart_config.page_table_start_addr != pa_config->gart_config.page_table_end_addr) {
+ phys_config.page_table_start_addr = pa_config->gart_config.page_table_start_addr >> 12;
+ phys_config.page_table_end_addr = pa_config->gart_config.page_table_end_addr >> 12;
+ phys_config.page_table_base_addr = pa_config->gart_config.page_table_base_addr;
+ phys_config.depth = 0;
+ phys_config.block_size = 0;
+ // Init VMID 0 based on PA config
+ dcn20_vmid_setup(&hubbub1->vmid[0], &phys_config);
+ }
+
+ return NUM_VMID;
+}
+
+bool hubbub3_program_watermarks(
+ struct hubbub *hubbub,
+ struct dcn_watermark_set *watermarks,
+ unsigned int refclk_mhz,
+ bool safe_to_lower)
+{
+ struct dcn20_hubbub *hubbub1 = TO_DCN20_HUBBUB(hubbub);
+ bool wm_pending = false;
+
+ if (hubbub21_program_urgent_watermarks(hubbub, watermarks, refclk_mhz, safe_to_lower))
+ wm_pending = true;
+
+ if (hubbub21_program_stutter_watermarks(hubbub, watermarks, refclk_mhz, safe_to_lower))
+ wm_pending = true;
+
+ if (hubbub21_program_pstate_watermarks(hubbub, watermarks, refclk_mhz, safe_to_lower))
+ wm_pending = true;
+
+ /*
+ * The DCHub arbiter has a mechanism to dynamically rate limit the DCHub request stream to the fabric.
+ * If the memory controller is fully utilized and the DCHub requestors are
+ * well ahead of their amortized schedule, then it is safe to prevent the next winner
+ * from being committed and sent to the fabric.
+ * The utilization of the memory controller is approximated by ensuring that
+ * the number of outstanding requests is greater than a threshold specified
+ * by the ARB_MIN_REQ_OUTSTANDING. To determine that the DCHub requestors are well ahead of the amortized schedule,
+ * the slack of the next winner is compared with the ARB_SAT_LEVEL in DLG RefClk cycles.
+ *
+ * TODO: Revisit request limit after figure out right number. request limit for Renoir isn't decided yet, set maximum value (0x1FF)
+ * to turn off it for now.
+ */
+ REG_SET(DCHUBBUB_ARB_SAT_LEVEL, 0,
+ DCHUBBUB_ARB_SAT_LEVEL, 60 * refclk_mhz);
+ REG_UPDATE(DCHUBBUB_ARB_DF_REQ_OUTSTAND,
+ DCHUBBUB_ARB_MIN_REQ_OUTSTAND, 0x1FF);
+
+ hubbub1_allow_self_refresh_control(hubbub, !hubbub->ctx->dc->debug.disable_stutter);
+
+ return wm_pending;
+}
+
+bool hubbub3_dcc_support_swizzle(
+ enum swizzle_mode_values swizzle,
+ unsigned int bytes_per_element,
+ enum segment_order *segment_order_horz,
+ enum segment_order *segment_order_vert)
+{
+ bool standard_swizzle = false;
+ bool display_swizzle = false;
+ bool render_swizzle = false;
+
+ switch (swizzle) {
+ case DC_SW_4KB_S:
+ case DC_SW_64KB_S:
+ case DC_SW_VAR_S:
+ case DC_SW_4KB_S_X:
+ case DC_SW_64KB_S_X:
+ case DC_SW_VAR_S_X:
+ standard_swizzle = true;
+ break;
+ case DC_SW_4KB_R:
+ case DC_SW_64KB_R:
+ case DC_SW_VAR_R:
+ case DC_SW_4KB_R_X:
+ case DC_SW_64KB_R_X:
+ case DC_SW_VAR_R_X:
+ render_swizzle = true;
+ break;
+ case DC_SW_4KB_D:
+ case DC_SW_64KB_D:
+ case DC_SW_VAR_D:
+ case DC_SW_4KB_D_X:
+ case DC_SW_64KB_D_X:
+ case DC_SW_VAR_D_X:
+ display_swizzle = true;
+ break;
+ default:
+ break;
+ }
+
+ if (standard_swizzle) {
+ if (bytes_per_element == 1) {
+ *segment_order_horz = segment_order__contiguous;
+ *segment_order_vert = segment_order__na;
+ return true;
+ }
+ if (bytes_per_element == 2) {
+ *segment_order_horz = segment_order__non_contiguous;
+ *segment_order_vert = segment_order__contiguous;
+ return true;
+ }
+ if (bytes_per_element == 4) {
+ *segment_order_horz = segment_order__non_contiguous;
+ *segment_order_vert = segment_order__contiguous;
+ return true;
+ }
+ if (bytes_per_element == 8) {
+ *segment_order_horz = segment_order__na;
+ *segment_order_vert = segment_order__contiguous;
+ return true;
+ }
+ }
+ if (render_swizzle) {
+ if (bytes_per_element == 1) {
+ *segment_order_horz = segment_order__contiguous;
+ *segment_order_vert = segment_order__na;
+ return true;
+ }
+ if (bytes_per_element == 2) {
+ *segment_order_horz = segment_order__non_contiguous;
+ *segment_order_vert = segment_order__contiguous;
+ return true;
+ }
+ if (bytes_per_element == 4) {
+ *segment_order_horz = segment_order__contiguous;
+ *segment_order_vert = segment_order__non_contiguous;
+ return true;
+ }
+ if (bytes_per_element == 8) {
+ *segment_order_horz = segment_order__contiguous;
+ *segment_order_vert = segment_order__non_contiguous;
+ return true;
+ }
+ }
+ if (display_swizzle && bytes_per_element == 8) {
+ *segment_order_horz = segment_order__contiguous;
+ *segment_order_vert = segment_order__non_contiguous;
+ return true;
+ }
+
+ return false;
+}
+
+static void hubbub3_get_blk256_size(unsigned int *blk256_width, unsigned int *blk256_height,
+ unsigned int bytes_per_element)
+{
+ /* copied from DML. might want to refactor DML to leverage from DML */
+ /* DML : get_blk256_size */
+ if (bytes_per_element == 1) {
+ *blk256_width = 16;
+ *blk256_height = 16;
+ } else if (bytes_per_element == 2) {
+ *blk256_width = 16;
+ *blk256_height = 8;
+ } else if (bytes_per_element == 4) {
+ *blk256_width = 8;
+ *blk256_height = 8;
+ } else if (bytes_per_element == 8) {
+ *blk256_width = 8;
+ *blk256_height = 4;
+ }
+}
+
+static void hubbub3_det_request_size(
+ unsigned int detile_buf_size,
+ unsigned int height,
+ unsigned int width,
+ unsigned int bpe,
+ bool *req128_horz_wc,
+ bool *req128_vert_wc)
+{
+ unsigned int blk256_height = 0;
+ unsigned int blk256_width = 0;
+ unsigned int swath_bytes_horz_wc, swath_bytes_vert_wc;
+
+ hubbub3_get_blk256_size(&blk256_width, &blk256_height, bpe);
+
+ swath_bytes_horz_wc = width * blk256_height * bpe;
+ swath_bytes_vert_wc = height * blk256_width * bpe;
+
+ *req128_horz_wc = (2 * swath_bytes_horz_wc <= detile_buf_size) ?
+ false : /* full 256B request */
+ true; /* half 128b request */
+
+ *req128_vert_wc = (2 * swath_bytes_vert_wc <= detile_buf_size) ?
+ false : /* full 256B request */
+ true; /* half 128b request */
+}
+
+bool hubbub3_get_dcc_compression_cap(struct hubbub *hubbub,
+ const struct dc_dcc_surface_param *input,
+ struct dc_surface_dcc_cap *output)
+{
+ struct dc *dc = hubbub->ctx->dc;
+ /* implement section 1.6.2.1 of DCN1_Programming_Guide.docx */
+ enum dcc_control dcc_control;
+ unsigned int bpe;
+ enum segment_order segment_order_horz, segment_order_vert;
+ bool req128_horz_wc, req128_vert_wc;
+
+ memset(output, 0, sizeof(*output));
+
+ if (dc->debug.disable_dcc == DCC_DISABLE)
+ return false;
+
+ if (!hubbub->funcs->dcc_support_pixel_format(input->format,
+ &bpe))
+ return false;
+
+ if (!hubbub->funcs->dcc_support_swizzle(input->swizzle_mode, bpe,
+ &segment_order_horz, &segment_order_vert))
+ return false;
+
+ hubbub3_det_request_size(TO_DCN20_HUBBUB(hubbub)->detile_buf_size,
+ input->surface_size.height, input->surface_size.width,
+ bpe, &req128_horz_wc, &req128_vert_wc);
+
+ if (!req128_horz_wc && !req128_vert_wc) {
+ dcc_control = dcc_control__256_256_xxx;
+ } else if (input->scan == SCAN_DIRECTION_HORIZONTAL) {
+ if (!req128_horz_wc)
+ dcc_control = dcc_control__256_256_xxx;
+ else if (segment_order_horz == segment_order__contiguous)
+ dcc_control = dcc_control__128_128_xxx;
+ else
+ dcc_control = dcc_control__256_64_64;
+ } else if (input->scan == SCAN_DIRECTION_VERTICAL) {
+ if (!req128_vert_wc)
+ dcc_control = dcc_control__256_256_xxx;
+ else if (segment_order_vert == segment_order__contiguous)
+ dcc_control = dcc_control__128_128_xxx;
+ else
+ dcc_control = dcc_control__256_64_64;
+ } else {
+ if ((req128_horz_wc &&
+ segment_order_horz == segment_order__non_contiguous) ||
+ (req128_vert_wc &&
+ segment_order_vert == segment_order__non_contiguous))
+ /* access_dir not known, must use most constraining */
+ dcc_control = dcc_control__256_64_64;
+ else
+ /* reg128 is true for either horz and vert
+ * but segment_order is contiguous
+ */
+ dcc_control = dcc_control__128_128_xxx;
+ }
+
+ /* Exception for 64KB_R_X */
+ if ((bpe == 2) && (input->swizzle_mode == DC_SW_64KB_R_X))
+ dcc_control = dcc_control__128_128_xxx;
+
+ if (dc->debug.disable_dcc == DCC_HALF_REQ_DISALBE &&
+ dcc_control != dcc_control__256_256_xxx)
+ return false;
+
+ switch (dcc_control) {
+ case dcc_control__256_256_xxx:
+ output->grph.rgb.max_uncompressed_blk_size = 256;
+ output->grph.rgb.max_compressed_blk_size = 256;
+ output->grph.rgb.independent_64b_blks = false;
+ output->grph.rgb.dcc_controls.dcc_256_256_unconstrained = 1;
+ output->grph.rgb.dcc_controls.dcc_256_128_128 = 1;
+ break;
+ case dcc_control__128_128_xxx:
+ output->grph.rgb.max_uncompressed_blk_size = 128;
+ output->grph.rgb.max_compressed_blk_size = 128;
+ output->grph.rgb.independent_64b_blks = false;
+ output->grph.rgb.dcc_controls.dcc_128_128_uncontrained = 1;
+ output->grph.rgb.dcc_controls.dcc_256_128_128 = 1;
+ break;
+ case dcc_control__256_64_64:
+ output->grph.rgb.max_uncompressed_blk_size = 256;
+ output->grph.rgb.max_compressed_blk_size = 64;
+ output->grph.rgb.independent_64b_blks = true;
+ output->grph.rgb.dcc_controls.dcc_256_64_64 = 1;
+ break;
+ case dcc_control__256_128_128:
+ output->grph.rgb.max_uncompressed_blk_size = 256;
+ output->grph.rgb.max_compressed_blk_size = 128;
+ output->grph.rgb.independent_64b_blks = false;
+ output->grph.rgb.dcc_controls.dcc_256_128_128 = 1;
+ break;
+ }
+ output->capable = true;
+ output->const_color_support = true;
+
+ return true;
+}
+
+void hubbub3_force_wm_propagate_to_pipes(struct hubbub *hubbub)
+{
+ struct dcn20_hubbub *hubbub1 = TO_DCN20_HUBBUB(hubbub);
+ uint32_t refclk_mhz = hubbub->ctx->dc->res_pool->ref_clocks.dchub_ref_clock_inKhz / 1000;
+ uint32_t prog_wm_value = convert_and_clamp(hubbub1->watermarks.a.urgent_ns,
+ refclk_mhz, 0x1fffff);
+
+ REG_SET_2(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_A, 0,
+ DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_A, prog_wm_value,
+ DCHUBBUB_ARB_VM_ROW_URGENCY_WATERMARK_A, prog_wm_value);
+}
+
+static const struct hubbub_funcs hubbub30_funcs = {
+ .update_dchub = hubbub2_update_dchub,
+ .init_dchub_sys_ctx = hubbub3_init_dchub_sys_ctx,
+ .init_vm_ctx = hubbub2_init_vm_ctx,
+ .dcc_support_swizzle = hubbub3_dcc_support_swizzle,
+ .dcc_support_pixel_format = hubbub2_dcc_support_pixel_format,
+ .get_dcc_compression_cap = hubbub3_get_dcc_compression_cap,
+ .wm_read_state = hubbub21_wm_read_state,
+ .get_dchub_ref_freq = hubbub2_get_dchub_ref_freq,
+ .program_watermarks = hubbub3_program_watermarks,
+ .allow_self_refresh_control = hubbub1_allow_self_refresh_control,
+ .is_allow_self_refresh_enabled = hubbub1_is_allow_self_refresh_enabled,
+ .force_wm_propagate_to_pipes = hubbub3_force_wm_propagate_to_pipes,
+};
+
+void hubbub3_construct(struct dcn20_hubbub *hubbub3,
+ struct dc_context *ctx,
+ const struct dcn_hubbub_registers *hubbub_regs,
+ const struct dcn_hubbub_shift *hubbub_shift,
+ const struct dcn_hubbub_mask *hubbub_mask)
+{
+ hubbub3->base.ctx = ctx;
+ hubbub3->base.funcs = &hubbub30_funcs;
+ hubbub3->regs = hubbub_regs;
+ hubbub3->shifts = hubbub_shift;
+ hubbub3->masks = hubbub_mask;
+
+ hubbub3->debug_test_index_pstate = 0xB;
+ hubbub3->detile_buf_size = 184 * 1024; /* 184KB for DCN3 */
+}
+
diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hubbub.h b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hubbub.h
new file mode 100644
index 000000000000..790baa00672b
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hubbub.h
@@ -0,0 +1,119 @@
+/*
+ * Copyright 2020 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef __DC_HUBBUB_DCN30_H__
+#define __DC_HUBBUB_DCN30_H__
+
+#include "dcn21/dcn21_hubbub.h"
+
+#define HUBBUB_REG_LIST_DCN3AG(id)\
+ HUBBUB_REG_LIST_DCN21()
+
+#define HUBBUB_MASK_SH_LIST_DCN3AG(mask_sh)\
+ HUBBUB_MASK_SH_LIST_DCN21(mask_sh)
+
+#define HUBBUB_REG_LIST_DCN30(id)\
+ HUBBUB_REG_LIST_DCN20_COMMON(), \
+ HUBBUB_SR_WATERMARK_REG_LIST(), \
+ SR(DCHUBBUB_ARB_FRAC_URG_BW_NOM_A),\
+ SR(DCHUBBUB_ARB_FRAC_URG_BW_NOM_B),\
+ SR(DCHUBBUB_ARB_FRAC_URG_BW_NOM_C),\
+ SR(DCHUBBUB_ARB_FRAC_URG_BW_NOM_D),\
+ SR(DCHUBBUB_ARB_FRAC_URG_BW_FLIP_A),\
+ SR(DCHUBBUB_ARB_FRAC_URG_BW_FLIP_B),\
+ SR(DCHUBBUB_ARB_FRAC_URG_BW_FLIP_C),\
+ SR(DCHUBBUB_ARB_FRAC_URG_BW_FLIP_D),\
+ SR(DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_A),\
+ SR(DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_B),\
+ SR(DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_C),\
+ SR(DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_D)
+
+#define HUBBUB_MASK_SH_LIST_DCN30(mask_sh)\
+ HUBBUB_MASK_SH_LIST_DCN_COMMON(mask_sh), \
+ HUBBUB_MASK_SH_LIST_STUTTER(mask_sh), \
+ HUBBUB_SF(DCHUBBUB_GLOBAL_TIMER_CNTL, DCHUBBUB_GLOBAL_TIMER_REFDIV, mask_sh), \
+ HUBBUB_SF(DCN_VM_FB_LOCATION_BASE, FB_BASE, mask_sh), \
+ HUBBUB_SF(DCN_VM_FB_LOCATION_TOP, FB_TOP, mask_sh), \
+ HUBBUB_SF(DCN_VM_FB_OFFSET, FB_OFFSET, mask_sh), \
+ HUBBUB_SF(DCN_VM_AGP_BOT, AGP_BOT, mask_sh), \
+ HUBBUB_SF(DCN_VM_AGP_TOP, AGP_TOP, mask_sh), \
+ HUBBUB_SF(DCN_VM_AGP_BASE, AGP_BASE, mask_sh), \
+ HUBBUB_SF(DCHUBBUB_ARB_FRAC_URG_BW_FLIP_A, DCHUBBUB_ARB_FRAC_URG_BW_FLIP_A, mask_sh), \
+ HUBBUB_SF(DCHUBBUB_ARB_FRAC_URG_BW_FLIP_B, DCHUBBUB_ARB_FRAC_URG_BW_FLIP_B, mask_sh), \
+ HUBBUB_SF(DCHUBBUB_ARB_FRAC_URG_BW_FLIP_C, DCHUBBUB_ARB_FRAC_URG_BW_FLIP_C, mask_sh), \
+ HUBBUB_SF(DCHUBBUB_ARB_FRAC_URG_BW_FLIP_D, DCHUBBUB_ARB_FRAC_URG_BW_FLIP_D, mask_sh), \
+ HUBBUB_SF(DCHUBBUB_ARB_FRAC_URG_BW_NOM_A, DCHUBBUB_ARB_FRAC_URG_BW_NOM_A, mask_sh), \
+ HUBBUB_SF(DCHUBBUB_ARB_FRAC_URG_BW_NOM_B, DCHUBBUB_ARB_FRAC_URG_BW_NOM_B, mask_sh), \
+ HUBBUB_SF(DCHUBBUB_ARB_FRAC_URG_BW_NOM_C, DCHUBBUB_ARB_FRAC_URG_BW_NOM_C, mask_sh), \
+ HUBBUB_SF(DCHUBBUB_ARB_FRAC_URG_BW_NOM_D, DCHUBBUB_ARB_FRAC_URG_BW_NOM_D, mask_sh), \
+ HUBBUB_SF(DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_A, DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_A, mask_sh), \
+ HUBBUB_SF(DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_B, DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_B, mask_sh), \
+ HUBBUB_SF(DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_C, DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_C, mask_sh), \
+ HUBBUB_SF(DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_D, DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_D, mask_sh), \
+ HUBBUB_SF(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_A, DCHUBBUB_ARB_VM_ROW_URGENCY_WATERMARK_A, mask_sh), \
+ HUBBUB_SF(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_B, DCHUBBUB_ARB_VM_ROW_URGENCY_WATERMARK_B, mask_sh), \
+ HUBBUB_SF(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_C, DCHUBBUB_ARB_VM_ROW_URGENCY_WATERMARK_C, mask_sh), \
+ HUBBUB_SF(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_D, DCHUBBUB_ARB_VM_ROW_URGENCY_WATERMARK_D, mask_sh), \
+ HUBBUB_SF(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_A, DCHUBBUB_ARB_VM_ROW_ALLOW_SR_ENTER_WATERMARK_A, mask_sh), \
+ HUBBUB_SF(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_B, DCHUBBUB_ARB_VM_ROW_ALLOW_SR_ENTER_WATERMARK_B, mask_sh), \
+ HUBBUB_SF(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_C, DCHUBBUB_ARB_VM_ROW_ALLOW_SR_ENTER_WATERMARK_C, mask_sh), \
+ HUBBUB_SF(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_D, DCHUBBUB_ARB_VM_ROW_ALLOW_SR_ENTER_WATERMARK_D, mask_sh), \
+ HUBBUB_SF(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_A, DCHUBBUB_ARB_VM_ROW_ALLOW_SR_EXIT_WATERMARK_A, mask_sh), \
+ HUBBUB_SF(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_B, DCHUBBUB_ARB_VM_ROW_ALLOW_SR_EXIT_WATERMARK_B, mask_sh), \
+ HUBBUB_SF(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_C, DCHUBBUB_ARB_VM_ROW_ALLOW_SR_EXIT_WATERMARK_C, mask_sh), \
+ HUBBUB_SF(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_D, DCHUBBUB_ARB_VM_ROW_ALLOW_SR_EXIT_WATERMARK_D, mask_sh), \
+ HUBBUB_SF(DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_A, DCHUBBUB_ARB_VM_ROW_ALLOW_DRAM_CLK_CHANGE_WATERMARK_A, mask_sh), \
+ HUBBUB_SF(DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_B, DCHUBBUB_ARB_VM_ROW_ALLOW_DRAM_CLK_CHANGE_WATERMARK_B, mask_sh), \
+ HUBBUB_SF(DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_C, DCHUBBUB_ARB_VM_ROW_ALLOW_DRAM_CLK_CHANGE_WATERMARK_C, mask_sh), \
+ HUBBUB_SF(DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_D, DCHUBBUB_ARB_VM_ROW_ALLOW_DRAM_CLK_CHANGE_WATERMARK_D, mask_sh)
+
+void hubbub3_construct(struct dcn20_hubbub *hubbub3,
+ struct dc_context *ctx,
+ const struct dcn_hubbub_registers *hubbub_regs,
+ const struct dcn_hubbub_shift *hubbub_shift,
+ const struct dcn_hubbub_mask *hubbub_mask);
+
+int hubbub3_init_dchub_sys_ctx(struct hubbub *hubbub,
+ struct dcn_hubbub_phys_addr_config *pa_config);
+
+bool hubbub3_dcc_support_swizzle(
+ enum swizzle_mode_values swizzle,
+ unsigned int bytes_per_element,
+ enum segment_order *segment_order_horz,
+ enum segment_order *segment_order_vert);
+
+void hubbub3_force_wm_propagate_to_pipes(struct hubbub *hubbub);
+
+bool hubbub3_get_dcc_compression_cap(struct hubbub *hubbub,
+ const struct dc_dcc_surface_param *input,
+ struct dc_surface_dcc_cap *output);
+
+bool hubbub3_program_watermarks(
+ struct hubbub *hubbub,
+ struct dcn_watermark_set *watermarks,
+ unsigned int refclk_mhz,
+ bool safe_to_lower);
+
+#endif
diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hubp.c b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hubp.c
new file mode 100644
index 000000000000..af462fe4260d
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hubp.c
@@ -0,0 +1,532 @@
+/*
+ * Copyright 2020 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#include "dcn30_hubp.h"
+
+#include "dm_services.h"
+#include "dce_calcs.h"
+#include "reg_helper.h"
+#include "basics/conversion.h"
+#include "dcn20/dcn20_hubp.h"
+#include "dcn21/dcn21_hubp.h"
+
+#define REG(reg)\
+ hubp2->hubp_regs->reg
+
+#define CTX \
+ hubp2->base.ctx
+
+#undef FN
+#define FN(reg_name, field_name) \
+ hubp2->hubp_shift->field_name, hubp2->hubp_mask->field_name
+
+void hubp3_set_vm_system_aperture_settings(struct hubp *hubp,
+ struct vm_system_aperture_param *apt)
+{
+ struct dcn20_hubp *hubp2 = TO_DCN20_HUBP(hubp);
+
+ PHYSICAL_ADDRESS_LOC mc_vm_apt_default;
+ PHYSICAL_ADDRESS_LOC mc_vm_apt_low;
+ PHYSICAL_ADDRESS_LOC mc_vm_apt_high;
+
+ // The format of default addr is 48:12 of the 48 bit addr
+ mc_vm_apt_default.quad_part = apt->sys_default.quad_part >> 12;
+
+ // The format of high/low are 48:18 of the 48 bit addr
+ mc_vm_apt_low.quad_part = apt->sys_low.quad_part >> 18;
+ mc_vm_apt_high.quad_part = apt->sys_high.quad_part >> 18;
+
+ REG_SET(DCN_VM_SYSTEM_APERTURE_LOW_ADDR, 0,
+ MC_VM_SYSTEM_APERTURE_LOW_ADDR, mc_vm_apt_low.quad_part);
+
+ REG_SET(DCN_VM_SYSTEM_APERTURE_HIGH_ADDR, 0,
+ MC_VM_SYSTEM_APERTURE_HIGH_ADDR, mc_vm_apt_high.quad_part);
+
+ REG_SET_2(DCN_VM_MX_L1_TLB_CNTL, 0,
+ ENABLE_L1_TLB, 1,
+ SYSTEM_ACCESS_MODE, 0x3);
+}
+
+bool hubp3_program_surface_flip_and_addr(
+ struct hubp *hubp,
+ const struct dc_plane_address *address,
+ bool flip_immediate)
+{
+ struct dcn20_hubp *hubp2 = TO_DCN20_HUBP(hubp);
+
+ //program flip type
+ REG_UPDATE(DCSURF_FLIP_CONTROL,
+ SURFACE_FLIP_TYPE, flip_immediate);
+
+ // Program VMID reg
+ if (flip_immediate == 0)
+ REG_UPDATE(VMID_SETTINGS_0,
+ VMID, address->vmid);
+
+ if (address->type == PLN_ADDR_TYPE_GRPH_STEREO) {
+ REG_UPDATE(DCSURF_FLIP_CONTROL, SURFACE_FLIP_MODE_FOR_STEREOSYNC, 0x1);
+ REG_UPDATE(DCSURF_FLIP_CONTROL, SURFACE_FLIP_IN_STEREOSYNC, 0x1);
+
+ } else {
+ // turn off stereo if not in stereo
+ REG_UPDATE(DCSURF_FLIP_CONTROL, SURFACE_FLIP_MODE_FOR_STEREOSYNC, 0x0);
+ REG_UPDATE(DCSURF_FLIP_CONTROL, SURFACE_FLIP_IN_STEREOSYNC, 0x0);
+ }
+
+ /* HW automatically latch rest of address register on write to
+ * DCSURF_PRIMARY_SURFACE_ADDRESS if SURFACE_UPDATE_LOCK is not used
+ *
+ * program high first and then the low addr, order matters!
+ */
+ switch (address->type) {
+ case PLN_ADDR_TYPE_GRAPHICS:
+ /* DCN1.0 does not support const color
+ * TODO: program DCHUBBUB_RET_PATH_DCC_CFGx_0/1
+ * base on address->grph.dcc_const_color
+ * x = 0, 2, 4, 6 for pipe 0, 1, 2, 3 for rgb and luma
+ * x = 1, 3, 5, 7 for pipe 0, 1, 2, 3 for chroma
+ */
+
+ if (address->grph.addr.quad_part == 0)
+ break;
+
+ REG_UPDATE_2(DCSURF_SURFACE_CONTROL,
+ PRIMARY_SURFACE_TMZ, address->tmz_surface,
+ PRIMARY_META_SURFACE_TMZ, address->tmz_surface);
+
+ if (address->grph.meta_addr.quad_part != 0) {
+ REG_SET(DCSURF_PRIMARY_META_SURFACE_ADDRESS_HIGH, 0,
+ PRIMARY_META_SURFACE_ADDRESS_HIGH,
+ address->grph.meta_addr.high_part);
+
+ REG_SET(DCSURF_PRIMARY_META_SURFACE_ADDRESS, 0,
+ PRIMARY_META_SURFACE_ADDRESS,
+ address->grph.meta_addr.low_part);
+ }
+
+ REG_SET(DCSURF_PRIMARY_SURFACE_ADDRESS_HIGH, 0,
+ PRIMARY_SURFACE_ADDRESS_HIGH,
+ address->grph.addr.high_part);
+
+ REG_SET(DCSURF_PRIMARY_SURFACE_ADDRESS, 0,
+ PRIMARY_SURFACE_ADDRESS,
+ address->grph.addr.low_part);
+ break;
+ case PLN_ADDR_TYPE_VIDEO_PROGRESSIVE:
+ if (address->video_progressive.luma_addr.quad_part == 0
+ || address->video_progressive.chroma_addr.quad_part == 0)
+ break;
+
+ REG_UPDATE_4(DCSURF_SURFACE_CONTROL,
+ PRIMARY_SURFACE_TMZ, address->tmz_surface,
+ PRIMARY_SURFACE_TMZ_C, address->tmz_surface,
+ PRIMARY_META_SURFACE_TMZ, address->tmz_surface,
+ PRIMARY_META_SURFACE_TMZ_C, address->tmz_surface);
+
+ if (address->video_progressive.luma_meta_addr.quad_part != 0) {
+ REG_SET(DCSURF_PRIMARY_META_SURFACE_ADDRESS_HIGH_C, 0,
+ PRIMARY_META_SURFACE_ADDRESS_HIGH_C,
+ address->video_progressive.chroma_meta_addr.high_part);
+
+ REG_SET(DCSURF_PRIMARY_META_SURFACE_ADDRESS_C, 0,
+ PRIMARY_META_SURFACE_ADDRESS_C,
+ address->video_progressive.chroma_meta_addr.low_part);
+
+ REG_SET(DCSURF_PRIMARY_META_SURFACE_ADDRESS_HIGH, 0,
+ PRIMARY_META_SURFACE_ADDRESS_HIGH,
+ address->video_progressive.luma_meta_addr.high_part);
+
+ REG_SET(DCSURF_PRIMARY_META_SURFACE_ADDRESS, 0,
+ PRIMARY_META_SURFACE_ADDRESS,
+ address->video_progressive.luma_meta_addr.low_part);
+ }
+
+ REG_SET(DCSURF_PRIMARY_SURFACE_ADDRESS_HIGH_C, 0,
+ PRIMARY_SURFACE_ADDRESS_HIGH_C,
+ address->video_progressive.chroma_addr.high_part);
+
+ REG_SET(DCSURF_PRIMARY_SURFACE_ADDRESS_C, 0,
+ PRIMARY_SURFACE_ADDRESS_C,
+ address->video_progressive.chroma_addr.low_part);
+
+ REG_SET(DCSURF_PRIMARY_SURFACE_ADDRESS_HIGH, 0,
+ PRIMARY_SURFACE_ADDRESS_HIGH,
+ address->video_progressive.luma_addr.high_part);
+
+ REG_SET(DCSURF_PRIMARY_SURFACE_ADDRESS, 0,
+ PRIMARY_SURFACE_ADDRESS,
+ address->video_progressive.luma_addr.low_part);
+ break;
+ case PLN_ADDR_TYPE_GRPH_STEREO:
+ if (address->grph_stereo.left_addr.quad_part == 0)
+ break;
+ if (address->grph_stereo.right_addr.quad_part == 0)
+ break;
+
+ REG_UPDATE_8(DCSURF_SURFACE_CONTROL,
+ PRIMARY_SURFACE_TMZ, address->tmz_surface,
+ PRIMARY_SURFACE_TMZ_C, address->tmz_surface,
+ PRIMARY_META_SURFACE_TMZ, address->tmz_surface,
+ PRIMARY_META_SURFACE_TMZ_C, address->tmz_surface,
+ SECONDARY_SURFACE_TMZ, address->tmz_surface,
+ SECONDARY_SURFACE_TMZ_C, address->tmz_surface,
+ SECONDARY_META_SURFACE_TMZ, address->tmz_surface,
+ SECONDARY_META_SURFACE_TMZ_C, address->tmz_surface);
+
+ if (address->grph_stereo.right_meta_addr.quad_part != 0) {
+
+ REG_SET(DCSURF_SECONDARY_META_SURFACE_ADDRESS_HIGH_C, 0,
+ SECONDARY_META_SURFACE_ADDRESS_HIGH_C,
+ address->grph_stereo.right_alpha_meta_addr.high_part);
+
+ REG_SET(DCSURF_SECONDARY_META_SURFACE_ADDRESS_C, 0,
+ SECONDARY_META_SURFACE_ADDRESS_C,
+ address->grph_stereo.right_alpha_meta_addr.low_part);
+
+ REG_SET(DCSURF_SECONDARY_META_SURFACE_ADDRESS_HIGH, 0,
+ SECONDARY_META_SURFACE_ADDRESS_HIGH,
+ address->grph_stereo.right_meta_addr.high_part);
+
+ REG_SET(DCSURF_SECONDARY_META_SURFACE_ADDRESS, 0,
+ SECONDARY_META_SURFACE_ADDRESS,
+ address->grph_stereo.right_meta_addr.low_part);
+ }
+ if (address->grph_stereo.left_meta_addr.quad_part != 0) {
+
+ REG_SET(DCSURF_PRIMARY_META_SURFACE_ADDRESS_HIGH_C, 0,
+ PRIMARY_META_SURFACE_ADDRESS_HIGH_C,
+ address->grph_stereo.left_alpha_meta_addr.high_part);
+
+ REG_SET(DCSURF_PRIMARY_META_SURFACE_ADDRESS_C, 0,
+ PRIMARY_META_SURFACE_ADDRESS_C,
+ address->grph_stereo.left_alpha_meta_addr.low_part);
+
+ REG_SET(DCSURF_PRIMARY_META_SURFACE_ADDRESS_HIGH, 0,
+ PRIMARY_META_SURFACE_ADDRESS_HIGH,
+ address->grph_stereo.left_meta_addr.high_part);
+
+ REG_SET(DCSURF_PRIMARY_META_SURFACE_ADDRESS, 0,
+ PRIMARY_META_SURFACE_ADDRESS,
+ address->grph_stereo.left_meta_addr.low_part);
+ }
+
+ REG_SET(DCSURF_SECONDARY_SURFACE_ADDRESS_HIGH_C, 0,
+ SECONDARY_SURFACE_ADDRESS_HIGH_C,
+ address->grph_stereo.right_alpha_addr.high_part);
+
+ REG_SET(DCSURF_SECONDARY_SURFACE_ADDRESS_C, 0,
+ SECONDARY_SURFACE_ADDRESS_C,
+ address->grph_stereo.right_alpha_addr.low_part);
+
+ REG_SET(DCSURF_SECONDARY_SURFACE_ADDRESS_HIGH, 0,
+ SECONDARY_SURFACE_ADDRESS_HIGH,
+ address->grph_stereo.right_addr.high_part);
+
+ REG_SET(DCSURF_SECONDARY_SURFACE_ADDRESS, 0,
+ SECONDARY_SURFACE_ADDRESS,
+ address->grph_stereo.right_addr.low_part);
+
+ REG_SET(DCSURF_PRIMARY_SURFACE_ADDRESS_HIGH_C, 0,
+ PRIMARY_SURFACE_ADDRESS_HIGH_C,
+ address->grph_stereo.left_alpha_addr.high_part);
+
+ REG_SET(DCSURF_PRIMARY_SURFACE_ADDRESS_C, 0,
+ PRIMARY_SURFACE_ADDRESS_C,
+ address->grph_stereo.left_alpha_addr.low_part);
+
+ REG_SET(DCSURF_PRIMARY_SURFACE_ADDRESS_HIGH, 0,
+ PRIMARY_SURFACE_ADDRESS_HIGH,
+ address->grph_stereo.left_addr.high_part);
+
+ REG_SET(DCSURF_PRIMARY_SURFACE_ADDRESS, 0,
+ PRIMARY_SURFACE_ADDRESS,
+ address->grph_stereo.left_addr.low_part);
+ break;
+ case PLN_ADDR_TYPE_RGBEA:
+ if (address->rgbea.addr.quad_part == 0
+ || address->rgbea.alpha_addr.quad_part == 0)
+ break;
+
+ REG_UPDATE_4(DCSURF_SURFACE_CONTROL,
+ PRIMARY_SURFACE_TMZ, address->tmz_surface,
+ PRIMARY_SURFACE_TMZ_C, address->tmz_surface,
+ PRIMARY_META_SURFACE_TMZ, address->tmz_surface,
+ PRIMARY_META_SURFACE_TMZ_C, address->tmz_surface);
+
+ if (address->rgbea.meta_addr.quad_part != 0) {
+
+ REG_SET(DCSURF_PRIMARY_META_SURFACE_ADDRESS_HIGH_C, 0,
+ PRIMARY_META_SURFACE_ADDRESS_HIGH_C,
+ address->rgbea.alpha_meta_addr.high_part);
+
+ REG_SET(DCSURF_PRIMARY_META_SURFACE_ADDRESS_C, 0,
+ PRIMARY_META_SURFACE_ADDRESS_C,
+ address->rgbea.alpha_meta_addr.low_part);
+
+ REG_SET(DCSURF_PRIMARY_META_SURFACE_ADDRESS_HIGH, 0,
+ PRIMARY_META_SURFACE_ADDRESS_HIGH,
+ address->rgbea.meta_addr.high_part);
+
+ REG_SET(DCSURF_PRIMARY_META_SURFACE_ADDRESS, 0,
+ PRIMARY_META_SURFACE_ADDRESS,
+ address->rgbea.meta_addr.low_part);
+ }
+
+ REG_SET(DCSURF_PRIMARY_SURFACE_ADDRESS_HIGH_C, 0,
+ PRIMARY_SURFACE_ADDRESS_HIGH_C,
+ address->rgbea.alpha_addr.high_part);
+
+ REG_SET(DCSURF_PRIMARY_SURFACE_ADDRESS_C, 0,
+ PRIMARY_SURFACE_ADDRESS_C,
+ address->rgbea.alpha_addr.low_part);
+
+ REG_SET(DCSURF_PRIMARY_SURFACE_ADDRESS_HIGH, 0,
+ PRIMARY_SURFACE_ADDRESS_HIGH,
+ address->rgbea.addr.high_part);
+
+ REG_SET(DCSURF_PRIMARY_SURFACE_ADDRESS, 0,
+ PRIMARY_SURFACE_ADDRESS,
+ address->rgbea.addr.low_part);
+ break;
+ default:
+ BREAK_TO_DEBUGGER();
+ break;
+ }
+
+ hubp->request_address = *address;
+
+ return true;
+}
+
+static void hubp3_program_tiling(
+ struct dcn20_hubp *hubp2,
+ const union dc_tiling_info *info,
+ const enum surface_pixel_format pixel_format)
+{
+ REG_UPDATE_4(DCSURF_ADDR_CONFIG,
+ NUM_PIPES, log_2(info->gfx9.num_pipes),
+ PIPE_INTERLEAVE, info->gfx9.pipe_interleave,
+ MAX_COMPRESSED_FRAGS, log_2(info->gfx9.max_compressed_frags),
+ NUM_PKRS, log_2(info->gfx9.num_pkrs));
+
+ REG_UPDATE_3(DCSURF_TILING_CONFIG,
+ SW_MODE, info->gfx9.swizzle,
+ META_LINEAR, info->gfx9.meta_linear,
+ PIPE_ALIGNED, info->gfx9.pipe_aligned);
+
+}
+
+void hubp3_dcc_control(struct hubp *hubp, bool enable,
+ enum hubp_ind_block_size blk_size)
+{
+ uint32_t dcc_en = enable ? 1 : 0;
+ struct dcn20_hubp *hubp2 = TO_DCN20_HUBP(hubp);
+
+ REG_UPDATE_4(DCSURF_SURFACE_CONTROL,
+ PRIMARY_SURFACE_DCC_EN, dcc_en,
+ PRIMARY_SURFACE_DCC_IND_BLK, blk_size,
+ SECONDARY_SURFACE_DCC_EN, dcc_en,
+ SECONDARY_SURFACE_DCC_IND_BLK, blk_size);
+}
+
+void hubp3_dcc_control_sienna_cichlid(struct hubp *hubp,
+ struct dc_plane_dcc_param *dcc)
+{
+ struct dcn20_hubp *hubp2 = TO_DCN20_HUBP(hubp);
+
+ /*Workaround until UMD fix the new dcc_ind_blk interface */
+ if (dcc->independent_64b_blks && dcc->dcc_ind_blk == 0)
+ dcc->dcc_ind_blk = 1;
+ if (dcc->independent_64b_blks_c && dcc->dcc_ind_blk_c == 0)
+ dcc->dcc_ind_blk_c = 1;
+
+ REG_UPDATE_6(DCSURF_SURFACE_CONTROL,
+ PRIMARY_SURFACE_DCC_EN, dcc->enable,
+ PRIMARY_SURFACE_DCC_IND_BLK, dcc->dcc_ind_blk,
+ PRIMARY_SURFACE_DCC_IND_BLK_C, dcc->dcc_ind_blk_c,
+ SECONDARY_SURFACE_DCC_EN, dcc->enable,
+ SECONDARY_SURFACE_DCC_IND_BLK, dcc->dcc_ind_blk,
+ SECONDARY_SURFACE_DCC_IND_BLK_C, dcc->dcc_ind_blk_c);
+}
+
+void hubp3_dmdata_set_attributes(
+ struct hubp *hubp,
+ const struct dc_dmdata_attributes *attr)
+{
+ struct dcn20_hubp *hubp2 = TO_DCN20_HUBP(hubp);
+
+ /*always HW mode */
+ REG_UPDATE(DMDATA_CNTL,
+ DMDATA_MODE, 1);
+
+ /* for DMDATA flip, need to use SURFACE_UPDATE_LOCK */
+ REG_UPDATE(DCSURF_FLIP_CONTROL, SURFACE_UPDATE_LOCK, 1);
+
+ /* toggle DMDATA_UPDATED and set repeat and size */
+ REG_UPDATE(DMDATA_CNTL,
+ DMDATA_UPDATED, 0);
+ REG_UPDATE_3(DMDATA_CNTL,
+ DMDATA_UPDATED, 1,
+ DMDATA_REPEAT, attr->dmdata_repeat,
+ DMDATA_SIZE, attr->dmdata_size);
+
+ /* set DMDATA address */
+ REG_WRITE(DMDATA_ADDRESS_LOW, attr->address.low_part);
+ REG_UPDATE(DMDATA_ADDRESS_HIGH,
+ DMDATA_ADDRESS_HIGH, attr->address.high_part);
+
+ REG_UPDATE(DCSURF_FLIP_CONTROL, SURFACE_UPDATE_LOCK, 0);
+
+}
+
+
+void hubp3_program_surface_config(
+ struct hubp *hubp,
+ enum surface_pixel_format format,
+ union dc_tiling_info *tiling_info,
+ struct plane_size *plane_size,
+ enum dc_rotation_angle rotation,
+ struct dc_plane_dcc_param *dcc,
+ bool horizontal_mirror,
+ unsigned int compat_level)
+{
+ struct dcn20_hubp *hubp2 = TO_DCN20_HUBP(hubp);
+
+ hubp3_dcc_control_sienna_cichlid(hubp, dcc);
+ hubp3_program_tiling(hubp2, tiling_info, format);
+ hubp2_program_size(hubp, format, plane_size, dcc);
+ hubp2_program_rotation(hubp, rotation, horizontal_mirror);
+ hubp2_program_pixel_format(hubp, format);
+}
+
+static void hubp3_program_deadline(
+ struct hubp *hubp,
+ struct _vcs_dpi_display_dlg_regs_st *dlg_attr,
+ struct _vcs_dpi_display_ttu_regs_st *ttu_attr)
+{
+ struct dcn20_hubp *hubp2 = TO_DCN20_HUBP(hubp);
+
+ hubp2_program_deadline(hubp, dlg_attr, ttu_attr);
+ REG_UPDATE(DCN_DMDATA_VM_CNTL,
+ REFCYC_PER_VM_DMDATA, dlg_attr->refcyc_per_vm_dmdata);
+}
+
+void hubp3_read_state(struct hubp *hubp)
+{
+ struct dcn20_hubp *hubp2 = TO_DCN20_HUBP(hubp);
+ struct dcn_hubp_state *s = &hubp2->state;
+ struct _vcs_dpi_display_rq_regs_st *rq_regs = &s->rq_regs;
+
+ hubp2_read_state_common(hubp);
+
+ REG_GET_7(DCHUBP_REQ_SIZE_CONFIG,
+ CHUNK_SIZE, &rq_regs->rq_regs_l.chunk_size,
+ MIN_CHUNK_SIZE, &rq_regs->rq_regs_l.min_chunk_size,
+ META_CHUNK_SIZE, &rq_regs->rq_regs_l.meta_chunk_size,
+ MIN_META_CHUNK_SIZE, &rq_regs->rq_regs_l.min_meta_chunk_size,
+ DPTE_GROUP_SIZE, &rq_regs->rq_regs_l.dpte_group_size,
+ SWATH_HEIGHT, &rq_regs->rq_regs_l.swath_height,
+ PTE_ROW_HEIGHT_LINEAR, &rq_regs->rq_regs_l.pte_row_height_linear);
+
+ REG_GET_7(DCHUBP_REQ_SIZE_CONFIG_C,
+ CHUNK_SIZE_C, &rq_regs->rq_regs_c.chunk_size,
+ MIN_CHUNK_SIZE_C, &rq_regs->rq_regs_c.min_chunk_size,
+ META_CHUNK_SIZE_C, &rq_regs->rq_regs_c.meta_chunk_size,
+ MIN_META_CHUNK_SIZE_C, &rq_regs->rq_regs_c.min_meta_chunk_size,
+ DPTE_GROUP_SIZE_C, &rq_regs->rq_regs_c.dpte_group_size,
+ SWATH_HEIGHT_C, &rq_regs->rq_regs_c.swath_height,
+ PTE_ROW_HEIGHT_LINEAR_C, &rq_regs->rq_regs_c.pte_row_height_linear);
+
+}
+
+void hubp3_setup(
+ struct hubp *hubp,
+ struct _vcs_dpi_display_dlg_regs_st *dlg_attr,
+ struct _vcs_dpi_display_ttu_regs_st *ttu_attr,
+ struct _vcs_dpi_display_rq_regs_st *rq_regs,
+ struct _vcs_dpi_display_pipe_dest_params_st *pipe_dest)
+{
+ /* otg is locked when this func is called. Register are double buffered.
+ * disable the requestors is not needed
+ */
+ hubp2_vready_at_or_After_vsync(hubp, pipe_dest);
+ hubp21_program_requestor(hubp, rq_regs);
+ hubp3_program_deadline(hubp, dlg_attr, ttu_attr);
+}
+
+void hubp3_init(struct hubp *hubp)
+{
+ // DEDCN21-133: Inconsistent row starting line for flip between DPTE and Meta
+ // This is a chicken bit to enable the ECO fix.
+
+ struct dcn20_hubp *hubp2 = TO_DCN20_HUBP(hubp);
+ //hubp[i].HUBPREQ_DEBUG.HUBPREQ_DEBUG[26] = 1;
+ REG_WRITE(HUBPREQ_DEBUG, 1 << 26);
+}
+
+static struct hubp_funcs dcn30_hubp_funcs = {
+ .hubp_enable_tripleBuffer = hubp2_enable_triplebuffer,
+ .hubp_is_triplebuffer_enabled = hubp2_is_triplebuffer_enabled,
+ .hubp_program_surface_flip_and_addr = hubp3_program_surface_flip_and_addr,
+ .hubp_program_surface_config = hubp3_program_surface_config,
+ .hubp_is_flip_pending = hubp2_is_flip_pending,
+ .hubp_setup = hubp3_setup,
+ .hubp_setup_interdependent = hubp2_setup_interdependent,
+ .hubp_set_vm_system_aperture_settings = hubp3_set_vm_system_aperture_settings,
+ .set_blank = hubp2_set_blank,
+ .dcc_control = hubp3_dcc_control,
+ .mem_program_viewport = min_set_viewport,
+ .set_cursor_attributes = hubp2_cursor_set_attributes,
+ .set_cursor_position = hubp2_cursor_set_position,
+ .hubp_clk_cntl = hubp2_clk_cntl,
+ .hubp_vtg_sel = hubp2_vtg_sel,
+ .dmdata_set_attributes = hubp3_dmdata_set_attributes,
+ .dmdata_load = hubp2_dmdata_load,
+ .dmdata_status_done = hubp2_dmdata_status_done,
+ .hubp_read_state = hubp3_read_state,
+ .hubp_clear_underflow = hubp2_clear_underflow,
+ .hubp_set_flip_control_surface_gsl = hubp2_set_flip_control_surface_gsl,
+ .hubp_init = hubp3_init,
+};
+
+bool hubp3_construct(
+ struct dcn20_hubp *hubp2,
+ struct dc_context *ctx,
+ uint32_t inst,
+ const struct dcn_hubp2_registers *hubp_regs,
+ const struct dcn_hubp2_shift *hubp_shift,
+ const struct dcn_hubp2_mask *hubp_mask)
+{
+ hubp2->base.funcs = &dcn30_hubp_funcs;
+ hubp2->base.ctx = ctx;
+ hubp2->hubp_regs = hubp_regs;
+ hubp2->hubp_shift = hubp_shift;
+ hubp2->hubp_mask = hubp_mask;
+ hubp2->base.inst = inst;
+ hubp2->base.opp_id = OPP_ID_INVALID;
+ hubp2->base.mpcc_id = 0xf;
+
+ return true;
+}
diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hubp.h b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hubp.h
new file mode 100644
index 000000000000..fd1fb3c531d1
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hubp.h
@@ -0,0 +1,292 @@
+/*
+ * Copyright 2020 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef __DC_HUBP_DCN30_H__
+#define __DC_HUBP_DCN30_H__
+
+#include "dcn20/dcn20_hubp.h"
+#include "dcn21/dcn21_hubp.h"
+
+#define HUBP_REG_LIST_DCN30(id)\
+ HUBP_REG_LIST_DCN21(id),\
+ SRI(DCN_DMDATA_VM_CNTL, HUBPREQ, id)
+
+
+#define HUBP_MASK_SH_LIST_DCN30_BASE(mask_sh)\
+ HUBP_MASK_SH_LIST_DCN21_COMMON(mask_sh),\
+ HUBP_SF(HUBP0_DCSURF_SURFACE_CONFIG, ALPHA_PLANE_EN, mask_sh),\
+ HUBP_SF(HUBPREQ0_DCN_DMDATA_VM_CNTL, REFCYC_PER_VM_DMDATA, mask_sh),\
+ HUBP_SF(HUBPREQ0_DCN_DMDATA_VM_CNTL, DMDATA_VM_FAULT_STATUS, mask_sh),\
+ HUBP_SF(HUBPREQ0_DCN_DMDATA_VM_CNTL, DMDATA_VM_FAULT_STATUS_CLEAR, mask_sh),\
+ HUBP_SF(HUBPREQ0_DCN_DMDATA_VM_CNTL, DMDATA_VM_UNDERFLOW_STATUS, mask_sh),\
+ HUBP_SF(HUBPREQ0_DCN_DMDATA_VM_CNTL, DMDATA_VM_LATE_STATUS, mask_sh),\
+ HUBP_SF(HUBPREQ0_DCN_DMDATA_VM_CNTL, DMDATA_VM_UNDERFLOW_STATUS_CLEAR, mask_sh),\
+ HUBP_SF(HUBPREQ0_DCN_DMDATA_VM_CNTL, DMDATA_VM_DONE, mask_sh),\
+ HUBP_SF(HUBP0_DCSURF_ADDR_CONFIG, NUM_PKRS, mask_sh)
+
+
+#define HUBP_MASK_SH_LIST_DCN30(mask_sh)\
+ HUBP_SF(HUBPREQ0_DCN_DMDATA_VM_CNTL, REFCYC_PER_VM_DMDATA, mask_sh),\
+ HUBP_SF(HUBPREQ0_DCN_DMDATA_VM_CNTL, DMDATA_VM_FAULT_STATUS, mask_sh),\
+ HUBP_SF(HUBPREQ0_DCN_DMDATA_VM_CNTL, DMDATA_VM_FAULT_STATUS_CLEAR, mask_sh),\
+ HUBP_SF(HUBPREQ0_DCN_DMDATA_VM_CNTL, DMDATA_VM_UNDERFLOW_STATUS, mask_sh),\
+ HUBP_SF(HUBPREQ0_DCN_DMDATA_VM_CNTL, DMDATA_VM_LATE_STATUS, mask_sh),\
+ HUBP_SF(HUBPREQ0_DCN_DMDATA_VM_CNTL, DMDATA_VM_UNDERFLOW_STATUS_CLEAR, mask_sh),\
+ HUBP_SF(HUBPREQ0_DCN_DMDATA_VM_CNTL, DMDATA_VM_DONE, mask_sh),\
+ HUBP_SF(HUBP0_DCHUBP_CNTL, HUBP_BLANK_EN, mask_sh),\
+ HUBP_SF(HUBP0_DCHUBP_CNTL, HUBP_TTU_DISABLE, mask_sh),\
+ HUBP_SF(HUBP0_DCHUBP_CNTL, HUBP_UNDERFLOW_STATUS, mask_sh),\
+ HUBP_SF(HUBP0_DCHUBP_CNTL, HUBP_UNDERFLOW_CLEAR, mask_sh),\
+ HUBP_SF(HUBP0_DCHUBP_CNTL, HUBP_NO_OUTSTANDING_REQ, mask_sh),\
+ HUBP_SF(HUBP0_DCHUBP_CNTL, HUBP_VTG_SEL, mask_sh),\
+ HUBP_SF(HUBP0_DCHUBP_CNTL, HUBP_DISABLE, mask_sh),\
+ HUBP_SF(HUBP0_DCSURF_ADDR_CONFIG, NUM_PIPES, mask_sh),\
+ HUBP_SF(HUBP0_DCSURF_ADDR_CONFIG, PIPE_INTERLEAVE, mask_sh),\
+ HUBP_SF(HUBP0_DCSURF_ADDR_CONFIG, MAX_COMPRESSED_FRAGS, mask_sh),\
+ HUBP_SF(HUBP0_DCSURF_ADDR_CONFIG, NUM_PKRS, mask_sh),\
+ HUBP_SF(HUBP0_DCSURF_TILING_CONFIG, SW_MODE, mask_sh),\
+ HUBP_SF(HUBP0_DCSURF_TILING_CONFIG, META_LINEAR, mask_sh),\
+ HUBP_SF(HUBP0_DCSURF_TILING_CONFIG, PIPE_ALIGNED, mask_sh),\
+ HUBP_SF(HUBPREQ0_DCSURF_SURFACE_PITCH, PITCH, mask_sh),\
+ HUBP_SF(HUBPREQ0_DCSURF_SURFACE_PITCH, META_PITCH, mask_sh),\
+ HUBP_SF(HUBPREQ0_DCSURF_SURFACE_PITCH_C, PITCH_C, mask_sh),\
+ HUBP_SF(HUBPREQ0_DCSURF_SURFACE_PITCH_C, META_PITCH_C, mask_sh),\
+ HUBP_SF(HUBP0_DCSURF_SURFACE_CONFIG, SURFACE_PIXEL_FORMAT, mask_sh),\
+ HUBP_SF(HUBPREQ0_DCSURF_FLIP_CONTROL, SURFACE_FLIP_TYPE, mask_sh),\
+ HUBP_SF(HUBPREQ0_DCSURF_FLIP_CONTROL, SURFACE_FLIP_MODE_FOR_STEREOSYNC, mask_sh),\
+ HUBP_SF(HUBPREQ0_DCSURF_FLIP_CONTROL, SURFACE_FLIP_IN_STEREOSYNC, mask_sh),\
+ HUBP_SF(HUBPREQ0_DCSURF_FLIP_CONTROL, SURFACE_FLIP_PENDING, mask_sh),\
+ HUBP_SF(HUBPREQ0_DCSURF_FLIP_CONTROL, SURFACE_UPDATE_LOCK, mask_sh),\
+ HUBP_SF(HUBP0_DCSURF_PRI_VIEWPORT_DIMENSION, PRI_VIEWPORT_WIDTH, mask_sh),\
+ HUBP_SF(HUBP0_DCSURF_PRI_VIEWPORT_DIMENSION, PRI_VIEWPORT_HEIGHT, mask_sh),\
+ HUBP_SF(HUBP0_DCSURF_PRI_VIEWPORT_START, PRI_VIEWPORT_X_START, mask_sh),\
+ HUBP_SF(HUBP0_DCSURF_PRI_VIEWPORT_START, PRI_VIEWPORT_Y_START, mask_sh),\
+ HUBP_SF(HUBP0_DCSURF_SEC_VIEWPORT_DIMENSION, SEC_VIEWPORT_WIDTH, mask_sh),\
+ HUBP_SF(HUBP0_DCSURF_SEC_VIEWPORT_DIMENSION, SEC_VIEWPORT_HEIGHT, mask_sh),\
+ HUBP_SF(HUBP0_DCSURF_SEC_VIEWPORT_START, SEC_VIEWPORT_X_START, mask_sh),\
+ HUBP_SF(HUBP0_DCSURF_SEC_VIEWPORT_START, SEC_VIEWPORT_Y_START, mask_sh),\
+ HUBP_SF(HUBP0_DCSURF_PRI_VIEWPORT_DIMENSION_C, PRI_VIEWPORT_WIDTH_C, mask_sh),\
+ HUBP_SF(HUBP0_DCSURF_PRI_VIEWPORT_DIMENSION_C, PRI_VIEWPORT_HEIGHT_C, mask_sh),\
+ HUBP_SF(HUBP0_DCSURF_PRI_VIEWPORT_START_C, PRI_VIEWPORT_X_START_C, mask_sh),\
+ HUBP_SF(HUBP0_DCSURF_PRI_VIEWPORT_START_C, PRI_VIEWPORT_Y_START_C, mask_sh),\
+ HUBP_SF(HUBP0_DCSURF_SEC_VIEWPORT_DIMENSION_C, SEC_VIEWPORT_WIDTH_C, mask_sh),\
+ HUBP_SF(HUBP0_DCSURF_SEC_VIEWPORT_DIMENSION_C, SEC_VIEWPORT_HEIGHT_C, mask_sh),\
+ HUBP_SF(HUBP0_DCSURF_SEC_VIEWPORT_START_C, SEC_VIEWPORT_X_START_C, mask_sh),\
+ HUBP_SF(HUBP0_DCSURF_SEC_VIEWPORT_START_C, SEC_VIEWPORT_Y_START_C, mask_sh),\
+ HUBP_SF(HUBPREQ0_DCSURF_PRIMARY_SURFACE_ADDRESS_HIGH, PRIMARY_SURFACE_ADDRESS_HIGH, mask_sh),\
+ HUBP_SF(HUBPREQ0_DCSURF_PRIMARY_SURFACE_ADDRESS, PRIMARY_SURFACE_ADDRESS, mask_sh),\
+ HUBP_SF(HUBPREQ0_DCSURF_SECONDARY_SURFACE_ADDRESS_HIGH, SECONDARY_SURFACE_ADDRESS_HIGH, mask_sh),\
+ HUBP_SF(HUBPREQ0_DCSURF_SECONDARY_SURFACE_ADDRESS, SECONDARY_SURFACE_ADDRESS, mask_sh),\
+ HUBP_SF(HUBPREQ0_DCSURF_PRIMARY_META_SURFACE_ADDRESS_HIGH, PRIMARY_META_SURFACE_ADDRESS_HIGH, mask_sh),\
+ HUBP_SF(HUBPREQ0_DCSURF_PRIMARY_META_SURFACE_ADDRESS, PRIMARY_META_SURFACE_ADDRESS, mask_sh),\
+ HUBP_SF(HUBPREQ0_DCSURF_SECONDARY_META_SURFACE_ADDRESS_HIGH, SECONDARY_META_SURFACE_ADDRESS_HIGH, mask_sh),\
+ HUBP_SF(HUBPREQ0_DCSURF_SECONDARY_META_SURFACE_ADDRESS, SECONDARY_META_SURFACE_ADDRESS, mask_sh),\
+ HUBP_SF(HUBPREQ0_DCSURF_PRIMARY_SURFACE_ADDRESS_HIGH_C, PRIMARY_SURFACE_ADDRESS_HIGH_C, mask_sh),\
+ HUBP_SF(HUBPREQ0_DCSURF_PRIMARY_SURFACE_ADDRESS_C, PRIMARY_SURFACE_ADDRESS_C, mask_sh),\
+ HUBP_SF(HUBPREQ0_DCSURF_SECONDARY_SURFACE_ADDRESS_HIGH_C, SECONDARY_SURFACE_ADDRESS_HIGH_C, mask_sh),\
+ HUBP_SF(HUBPREQ0_DCSURF_SECONDARY_SURFACE_ADDRESS_C, SECONDARY_SURFACE_ADDRESS_C, mask_sh),\
+ HUBP_SF(HUBPREQ0_DCSURF_PRIMARY_META_SURFACE_ADDRESS_HIGH_C, PRIMARY_META_SURFACE_ADDRESS_HIGH_C, mask_sh),\
+ HUBP_SF(HUBPREQ0_DCSURF_PRIMARY_META_SURFACE_ADDRESS_C, PRIMARY_META_SURFACE_ADDRESS_C, mask_sh),\
+ HUBP_SF(HUBPREQ0_DCSURF_SECONDARY_META_SURFACE_ADDRESS_HIGH_C, SECONDARY_META_SURFACE_ADDRESS_HIGH_C, mask_sh),\
+ HUBP_SF(HUBPREQ0_DCSURF_SECONDARY_META_SURFACE_ADDRESS_C, SECONDARY_META_SURFACE_ADDRESS_C, mask_sh),\
+ HUBP_SF(HUBPREQ0_DCSURF_SURFACE_INUSE, SURFACE_INUSE_ADDRESS, mask_sh),\
+ HUBP_SF(HUBPREQ0_DCSURF_SURFACE_INUSE_HIGH, SURFACE_INUSE_ADDRESS_HIGH, mask_sh),\
+ HUBP_SF(HUBPREQ0_DCSURF_SURFACE_INUSE_C, SURFACE_INUSE_ADDRESS_C, mask_sh),\
+ HUBP_SF(HUBPREQ0_DCSURF_SURFACE_INUSE_HIGH_C, SURFACE_INUSE_ADDRESS_HIGH_C, mask_sh),\
+ HUBP_SF(HUBPREQ0_DCSURF_SURFACE_EARLIEST_INUSE, SURFACE_EARLIEST_INUSE_ADDRESS, mask_sh),\
+ HUBP_SF(HUBPREQ0_DCSURF_SURFACE_EARLIEST_INUSE_HIGH, SURFACE_EARLIEST_INUSE_ADDRESS_HIGH, mask_sh),\
+ HUBP_SF(HUBPREQ0_DCSURF_SURFACE_EARLIEST_INUSE_C, SURFACE_EARLIEST_INUSE_ADDRESS_C, mask_sh),\
+ HUBP_SF(HUBPREQ0_DCSURF_SURFACE_EARLIEST_INUSE_HIGH_C, SURFACE_EARLIEST_INUSE_ADDRESS_HIGH_C, mask_sh),\
+ HUBP_SF(HUBPREQ0_DCSURF_SURFACE_CONTROL, PRIMARY_SURFACE_TMZ, mask_sh),\
+ HUBP_SF(HUBPREQ0_DCSURF_SURFACE_CONTROL, PRIMARY_SURFACE_TMZ_C, mask_sh),\
+ HUBP_SF(HUBPREQ0_DCSURF_SURFACE_CONTROL, PRIMARY_META_SURFACE_TMZ, mask_sh),\
+ HUBP_SF(HUBPREQ0_DCSURF_SURFACE_CONTROL, PRIMARY_META_SURFACE_TMZ_C, mask_sh),\
+ HUBP_SF(HUBPREQ0_DCSURF_SURFACE_CONTROL, PRIMARY_SURFACE_DCC_EN, mask_sh),\
+ HUBP_SF(HUBPREQ0_DCSURF_SURFACE_CONTROL, PRIMARY_SURFACE_DCC_IND_BLK, mask_sh),\
+ HUBP_SF(HUBPREQ0_DCSURF_SURFACE_CONTROL, PRIMARY_SURFACE_DCC_IND_BLK_C, mask_sh),\
+ HUBP_SF(HUBPREQ0_DCSURF_SURFACE_CONTROL, SECONDARY_SURFACE_TMZ, mask_sh),\
+ HUBP_SF(HUBPREQ0_DCSURF_SURFACE_CONTROL, SECONDARY_SURFACE_TMZ_C, mask_sh),\
+ HUBP_SF(HUBPREQ0_DCSURF_SURFACE_CONTROL, SECONDARY_META_SURFACE_TMZ, mask_sh),\
+ HUBP_SF(HUBPREQ0_DCSURF_SURFACE_CONTROL, SECONDARY_META_SURFACE_TMZ_C, mask_sh),\
+ HUBP_SF(HUBPREQ0_DCSURF_SURFACE_CONTROL, SECONDARY_SURFACE_DCC_EN, mask_sh),\
+ HUBP_SF(HUBPREQ0_DCSURF_SURFACE_CONTROL, SECONDARY_SURFACE_DCC_IND_BLK, mask_sh),\
+ HUBP_SF(HUBPREQ0_DCSURF_SURFACE_CONTROL, SECONDARY_SURFACE_DCC_IND_BLK_C, mask_sh),\
+ HUBP_SF(HUBPRET0_HUBPRET_CONTROL, DET_BUF_PLANE1_BASE_ADDRESS, mask_sh),\
+ HUBP_SF(HUBPRET0_HUBPRET_CONTROL, CROSSBAR_SRC_CB_B, mask_sh),\
+ HUBP_SF(HUBPRET0_HUBPRET_CONTROL, CROSSBAR_SRC_CR_R, mask_sh),\
+ HUBP_SF(HUBPRET0_HUBPRET_CONTROL, CROSSBAR_SRC_Y_G, mask_sh),\
+ HUBP_SF(HUBPRET0_HUBPRET_CONTROL, CROSSBAR_SRC_ALPHA, mask_sh),\
+ HUBP_SF(HUBPRET0_HUBPRET_CONTROL, PACK_3TO2_ELEMENT_DISABLE, mask_sh),\
+ HUBP_SF(HUBPREQ0_DCN_EXPANSION_MODE, DRQ_EXPANSION_MODE, mask_sh),\
+ HUBP_SF(HUBPREQ0_DCN_EXPANSION_MODE, PRQ_EXPANSION_MODE, mask_sh),\
+ HUBP_SF(HUBPREQ0_DCN_EXPANSION_MODE, MRQ_EXPANSION_MODE, mask_sh),\
+ HUBP_SF(HUBPREQ0_DCN_EXPANSION_MODE, CRQ_EXPANSION_MODE, mask_sh),\
+ HUBP_SF(HUBP0_DCHUBP_REQ_SIZE_CONFIG, CHUNK_SIZE, mask_sh),\
+ HUBP_SF(HUBP0_DCHUBP_REQ_SIZE_CONFIG, MIN_CHUNK_SIZE, mask_sh),\
+ HUBP_SF(HUBP0_DCHUBP_REQ_SIZE_CONFIG, META_CHUNK_SIZE, mask_sh),\
+ HUBP_SF(HUBP0_DCHUBP_REQ_SIZE_CONFIG, MIN_META_CHUNK_SIZE, mask_sh),\
+ HUBP_SF(HUBP0_DCHUBP_REQ_SIZE_CONFIG, DPTE_GROUP_SIZE, mask_sh),\
+ HUBP_SF(HUBP0_DCHUBP_REQ_SIZE_CONFIG, SWATH_HEIGHT, mask_sh),\
+ HUBP_SF(HUBP0_DCHUBP_REQ_SIZE_CONFIG, PTE_ROW_HEIGHT_LINEAR, mask_sh),\
+ HUBP_SF(HUBP0_DCHUBP_REQ_SIZE_CONFIG_C, CHUNK_SIZE_C, mask_sh),\
+ HUBP_SF(HUBP0_DCHUBP_REQ_SIZE_CONFIG_C, MIN_CHUNK_SIZE_C, mask_sh),\
+ HUBP_SF(HUBP0_DCHUBP_REQ_SIZE_CONFIG_C, META_CHUNK_SIZE_C, mask_sh),\
+ HUBP_SF(HUBP0_DCHUBP_REQ_SIZE_CONFIG_C, MIN_META_CHUNK_SIZE_C, mask_sh),\
+ HUBP_SF(HUBP0_DCHUBP_REQ_SIZE_CONFIG_C, DPTE_GROUP_SIZE_C, mask_sh),\
+ HUBP_SF(HUBP0_DCHUBP_REQ_SIZE_CONFIG_C, SWATH_HEIGHT_C, mask_sh),\
+ HUBP_SF(HUBP0_DCHUBP_REQ_SIZE_CONFIG_C, PTE_ROW_HEIGHT_LINEAR_C, mask_sh),\
+ HUBP_SF(HUBPREQ0_BLANK_OFFSET_0, REFCYC_H_BLANK_END, mask_sh),\
+ HUBP_SF(HUBPREQ0_BLANK_OFFSET_0, DLG_V_BLANK_END, mask_sh),\
+ HUBP_SF(HUBPREQ0_BLANK_OFFSET_1, MIN_DST_Y_NEXT_START, mask_sh),\
+ HUBP_SF(HUBPREQ0_DST_DIMENSIONS, REFCYC_PER_HTOTAL, mask_sh),\
+ HUBP_SF(HUBPREQ0_DST_AFTER_SCALER, REFCYC_X_AFTER_SCALER, mask_sh),\
+ HUBP_SF(HUBPREQ0_DST_AFTER_SCALER, DST_Y_AFTER_SCALER, mask_sh),\
+ HUBP_SF(HUBPREQ0_VBLANK_PARAMETERS_0, DST_Y_PER_VM_VBLANK, mask_sh),\
+ HUBP_SF(HUBPREQ0_VBLANK_PARAMETERS_0, DST_Y_PER_ROW_VBLANK, mask_sh),\
+ HUBP_SF(HUBPREQ0_REF_FREQ_TO_PIX_FREQ, REF_FREQ_TO_PIX_FREQ, mask_sh),\
+ HUBP_SF(HUBPREQ0_VBLANK_PARAMETERS_1, REFCYC_PER_PTE_GROUP_VBLANK_L, mask_sh),\
+ HUBP_SF(HUBPREQ0_VBLANK_PARAMETERS_3, REFCYC_PER_META_CHUNK_VBLANK_L, mask_sh),\
+ HUBP_SF(HUBPREQ0_NOM_PARAMETERS_4, DST_Y_PER_META_ROW_NOM_L, mask_sh),\
+ HUBP_SF(HUBPREQ0_NOM_PARAMETERS_5, REFCYC_PER_META_CHUNK_NOM_L, mask_sh),\
+ HUBP_SF(HUBPREQ0_PER_LINE_DELIVERY_PRE, REFCYC_PER_LINE_DELIVERY_PRE_L, mask_sh),\
+ HUBP_SF(HUBPREQ0_PER_LINE_DELIVERY_PRE, REFCYC_PER_LINE_DELIVERY_PRE_C, mask_sh),\
+ HUBP_SF(HUBPREQ0_PER_LINE_DELIVERY, REFCYC_PER_LINE_DELIVERY_L, mask_sh),\
+ HUBP_SF(HUBPREQ0_PER_LINE_DELIVERY, REFCYC_PER_LINE_DELIVERY_C, mask_sh),\
+ HUBP_SF(HUBPREQ0_VBLANK_PARAMETERS_2, REFCYC_PER_PTE_GROUP_VBLANK_C, mask_sh),\
+ HUBP_SF(HUBPREQ0_VBLANK_PARAMETERS_4, REFCYC_PER_META_CHUNK_VBLANK_C, mask_sh),\
+ HUBP_SF(HUBPREQ0_NOM_PARAMETERS_6, DST_Y_PER_META_ROW_NOM_C, mask_sh),\
+ HUBP_SF(HUBPREQ0_NOM_PARAMETERS_7, REFCYC_PER_META_CHUNK_NOM_C, mask_sh),\
+ HUBP_SF(HUBPREQ0_DCN_TTU_QOS_WM, QoS_LEVEL_LOW_WM, mask_sh),\
+ HUBP_SF(HUBPREQ0_DCN_TTU_QOS_WM, QoS_LEVEL_HIGH_WM, mask_sh),\
+ HUBP_SF(HUBPREQ0_DCN_GLOBAL_TTU_CNTL, MIN_TTU_VBLANK, mask_sh),\
+ HUBP_SF(HUBPREQ0_DCN_GLOBAL_TTU_CNTL, QoS_LEVEL_FLIP, mask_sh),\
+ HUBP_SF(HUBPREQ0_DCN_GLOBAL_TTU_CNTL, ROW_TTU_MODE, mask_sh),\
+ HUBP_SF(HUBPREQ0_DCN_SURF0_TTU_CNTL0, REFCYC_PER_REQ_DELIVERY, mask_sh),\
+ HUBP_SF(HUBPREQ0_DCN_SURF0_TTU_CNTL0, QoS_LEVEL_FIXED, mask_sh),\
+ HUBP_SF(HUBPREQ0_DCN_SURF0_TTU_CNTL0, QoS_RAMP_DISABLE, mask_sh),\
+ HUBP_SF(HUBPREQ0_DCN_SURF0_TTU_CNTL1, REFCYC_PER_REQ_DELIVERY_PRE, mask_sh),\
+ HUBP_SF(HUBP0_HUBP_CLK_CNTL, HUBP_CLOCK_ENABLE, mask_sh),\
+ HUBP_MASK_SH_LIST_DCN_VM(mask_sh),\
+ HUBP_SF(HUBP0_DCSURF_SURFACE_CONFIG, ROTATION_ANGLE, mask_sh),\
+ HUBP_SF(HUBP0_DCSURF_SURFACE_CONFIG, H_MIRROR_EN, mask_sh),\
+ HUBP_SF(HUBP0_DCSURF_SURFACE_CONFIG, ALPHA_PLANE_EN, mask_sh),\
+ HUBP_SF(HUBPREQ0_PREFETCH_SETTINGS, DST_Y_PREFETCH, mask_sh),\
+ HUBP_SF(HUBPREQ0_PREFETCH_SETTINGS, VRATIO_PREFETCH, mask_sh),\
+ HUBP_SF(HUBPREQ0_PREFETCH_SETTINGS_C, VRATIO_PREFETCH_C, mask_sh),\
+ HUBP_SF(HUBPREQ0_DCN_VM_SYSTEM_APERTURE_LOW_ADDR, MC_VM_SYSTEM_APERTURE_LOW_ADDR, mask_sh),\
+ HUBP_SF(HUBPREQ0_DCN_VM_SYSTEM_APERTURE_HIGH_ADDR, MC_VM_SYSTEM_APERTURE_HIGH_ADDR, mask_sh),\
+ HUBP_SF(HUBPREQ0_CURSOR_SETTINGS, CURSOR0_DST_Y_OFFSET, mask_sh), \
+ HUBP_SF(HUBPREQ0_CURSOR_SETTINGS, CURSOR0_CHUNK_HDL_ADJUST, mask_sh), \
+ HUBP_SF(CURSOR0_0_CURSOR_SURFACE_ADDRESS_HIGH, CURSOR_SURFACE_ADDRESS_HIGH, mask_sh), \
+ HUBP_SF(CURSOR0_0_CURSOR_SURFACE_ADDRESS, CURSOR_SURFACE_ADDRESS, mask_sh), \
+ HUBP_SF(CURSOR0_0_CURSOR_SIZE, CURSOR_WIDTH, mask_sh), \
+ HUBP_SF(CURSOR0_0_CURSOR_SIZE, CURSOR_HEIGHT, mask_sh), \
+ HUBP_SF(CURSOR0_0_CURSOR_CONTROL, CURSOR_MODE, mask_sh), \
+ HUBP_SF(CURSOR0_0_CURSOR_CONTROL, CURSOR_2X_MAGNIFY, mask_sh), \
+ HUBP_SF(CURSOR0_0_CURSOR_CONTROL, CURSOR_PITCH, mask_sh), \
+ HUBP_SF(CURSOR0_0_CURSOR_CONTROL, CURSOR_LINES_PER_CHUNK, mask_sh), \
+ HUBP_SF(CURSOR0_0_CURSOR_CONTROL, CURSOR_ENABLE, mask_sh), \
+ HUBP_SF(CURSOR0_0_CURSOR_POSITION, CURSOR_X_POSITION, mask_sh), \
+ HUBP_SF(CURSOR0_0_CURSOR_POSITION, CURSOR_Y_POSITION, mask_sh), \
+ HUBP_SF(CURSOR0_0_CURSOR_HOT_SPOT, CURSOR_HOT_SPOT_X, mask_sh), \
+ HUBP_SF(CURSOR0_0_CURSOR_HOT_SPOT, CURSOR_HOT_SPOT_Y, mask_sh), \
+ HUBP_SF(CURSOR0_0_CURSOR_DST_OFFSET, CURSOR_DST_X_OFFSET, mask_sh), \
+ HUBP_SF(CURSOR0_0_DMDATA_ADDRESS_HIGH, DMDATA_ADDRESS_HIGH, mask_sh), \
+ HUBP_SF(CURSOR0_0_DMDATA_CNTL, DMDATA_MODE, mask_sh), \
+ HUBP_SF(CURSOR0_0_DMDATA_CNTL, DMDATA_UPDATED, mask_sh), \
+ HUBP_SF(CURSOR0_0_DMDATA_CNTL, DMDATA_REPEAT, mask_sh), \
+ HUBP_SF(CURSOR0_0_DMDATA_CNTL, DMDATA_SIZE, mask_sh), \
+ HUBP_SF(CURSOR0_0_DMDATA_SW_CNTL, DMDATA_SW_UPDATED, mask_sh), \
+ HUBP_SF(CURSOR0_0_DMDATA_SW_CNTL, DMDATA_SW_REPEAT, mask_sh), \
+ HUBP_SF(CURSOR0_0_DMDATA_SW_CNTL, DMDATA_SW_SIZE, mask_sh), \
+ HUBP_SF(CURSOR0_0_DMDATA_QOS_CNTL, DMDATA_QOS_MODE, mask_sh), \
+ HUBP_SF(CURSOR0_0_DMDATA_QOS_CNTL, DMDATA_QOS_LEVEL, mask_sh), \
+ HUBP_SF(CURSOR0_0_DMDATA_QOS_CNTL, DMDATA_DL_DELTA, mask_sh), \
+ HUBP_SF(CURSOR0_0_DMDATA_STATUS, DMDATA_DONE, mask_sh),\
+ HUBP_SF(HUBPREQ0_FLIP_PARAMETERS_0, DST_Y_PER_VM_FLIP, mask_sh),\
+ HUBP_SF(HUBPREQ0_FLIP_PARAMETERS_0, DST_Y_PER_ROW_FLIP, mask_sh),\
+ HUBP_SF(HUBPREQ0_FLIP_PARAMETERS_1, REFCYC_PER_PTE_GROUP_FLIP_L, mask_sh),\
+ HUBP_SF(HUBPREQ0_FLIP_PARAMETERS_2, REFCYC_PER_META_CHUNK_FLIP_L, mask_sh),\
+ HUBP_SF(HUBP0_DCHUBP_CNTL, HUBP_VREADY_AT_OR_AFTER_VSYNC, mask_sh),\
+ HUBP_SF(HUBP0_DCHUBP_CNTL, HUBP_DISABLE_STOP_DATA_DURING_VM, mask_sh),\
+ HUBP_SF(HUBPREQ0_DCSURF_FLIP_CONTROL, HUBPREQ_MASTER_UPDATE_LOCK_STATUS, mask_sh),\
+ HUBP_SF(HUBPREQ0_DCSURF_FLIP_CONTROL2, SURFACE_GSL_ENABLE, mask_sh),\
+ HUBP_SF(HUBPREQ0_DCSURF_FLIP_CONTROL2, SURFACE_TRIPLE_BUFFER_ENABLE, mask_sh),\
+ HUBP_SF(HUBPREQ0_VMID_SETTINGS_0, VMID, mask_sh),\
+ HUBP_SF(HUBPREQ0_FLIP_PARAMETERS_3, REFCYC_PER_VM_GROUP_FLIP, mask_sh),\
+ HUBP_SF(HUBPREQ0_FLIP_PARAMETERS_4, REFCYC_PER_VM_REQ_FLIP, mask_sh),\
+ HUBP_SF(HUBPREQ0_FLIP_PARAMETERS_5, REFCYC_PER_PTE_GROUP_FLIP_C, mask_sh),\
+ HUBP_SF(HUBPREQ0_FLIP_PARAMETERS_6, REFCYC_PER_META_CHUNK_FLIP_C, mask_sh),\
+ HUBP_SF(HUBPREQ0_VBLANK_PARAMETERS_5, REFCYC_PER_VM_GROUP_VBLANK, mask_sh),\
+ HUBP_SF(HUBPREQ0_VBLANK_PARAMETERS_6, REFCYC_PER_VM_REQ_VBLANK, mask_sh),\
+ HUBP_SF(HUBP0_DCHUBP_REQ_SIZE_CONFIG, VM_GROUP_SIZE, mask_sh)
+
+bool hubp3_construct(
+ struct dcn20_hubp *hubp2,
+ struct dc_context *ctx,
+ uint32_t inst,
+ const struct dcn_hubp2_registers *hubp_regs,
+ const struct dcn_hubp2_shift *hubp_shift,
+ const struct dcn_hubp2_mask *hubp_mask);
+
+bool hubp3_program_surface_flip_and_addr(
+ struct hubp *hubp,
+ const struct dc_plane_address *address,
+ bool flip_immediate);
+
+void hubp3_program_surface_config(
+ struct hubp *hubp,
+ enum surface_pixel_format format,
+ union dc_tiling_info *tiling_info,
+ struct plane_size *plane_size,
+ enum dc_rotation_angle rotation,
+ struct dc_plane_dcc_param *dcc,
+ bool horizontal_mirror,
+ unsigned int compat_level);
+
+void hubp3_setup(
+ struct hubp *hubp,
+ struct _vcs_dpi_display_dlg_regs_st *dlg_attr,
+ struct _vcs_dpi_display_ttu_regs_st *ttu_attr,
+ struct _vcs_dpi_display_rq_regs_st *rq_regs,
+ struct _vcs_dpi_display_pipe_dest_params_st *pipe_dest);
+
+void hubp3_dcc_control(struct hubp *hubp, bool enable,
+ enum hubp_ind_block_size blk_size);
+
+void hubp3_dcc_control_sienna_cichlid(struct hubp *hubp,
+ struct dc_plane_dcc_param *dcc);
+
+void hubp3_dmdata_set_attributes(
+ struct hubp *hubp,
+ const struct dc_dmdata_attributes *attr);
+
+void hubp3_read_state(struct hubp *hubp);
+
+void hubp3_init(struct hubp *hubp);
+
+#endif /* __DC_HUBP_DCN30_H__ */
+
+
diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hwseq.c b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hwseq.c
new file mode 100644
index 000000000000..a5d750ed569e
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hwseq.c
@@ -0,0 +1,719 @@
+/*
+ * Copyright 2020 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+
+#include "dm_services.h"
+#include "dm_helpers.h"
+#include "core_types.h"
+#include "resource.h"
+#include "dcn30_hwseq.h"
+#include "dccg.h"
+#include "dce/dce_hwseq.h"
+#include "dcn30_mpc.h"
+#include "dcn30_dpp.h"
+#include "dcn10/dcn10_cm_common.h"
+#include "dcn30_cm_common.h"
+#include "clk_mgr.h"
+#include "reg_helper.h"
+#include "abm.h"
+#include "clk_mgr.h"
+#include "hubp.h"
+#include "dchubbub.h"
+#include "timing_generator.h"
+#include "opp.h"
+#include "ipp.h"
+#include "mpc.h"
+#include "mcif_wb.h"
+#include "dc_dmub_srv.h"
+#include "link_hwss.h"
+#include "dpcd_defs.h"
+
+
+
+
+#define DC_LOGGER_INIT(logger)
+
+#define CTX \
+ hws->ctx
+#define REG(reg)\
+ hws->regs->reg
+#define DC_LOGGER \
+ dc->ctx->logger
+
+
+#undef FN
+#define FN(reg_name, field_name) \
+ hws->shifts->field_name, hws->masks->field_name
+
+bool dcn30_set_blend_lut(
+ struct pipe_ctx *pipe_ctx, const struct dc_plane_state *plane_state)
+{
+ struct dpp *dpp_base = pipe_ctx->plane_res.dpp;
+ bool result = true;
+ struct pwl_params *blend_lut = NULL;
+
+ if (plane_state->blend_tf) {
+ if (plane_state->blend_tf->type == TF_TYPE_HWPWL)
+ blend_lut = &plane_state->blend_tf->pwl;
+ else if (plane_state->blend_tf->type == TF_TYPE_DISTRIBUTED_POINTS) {
+ cm3_helper_translate_curve_to_hw_format(
+ plane_state->blend_tf, &dpp_base->regamma_params, false);
+ blend_lut = &dpp_base->regamma_params;
+ }
+ }
+ result = dpp_base->funcs->dpp_program_blnd_lut(dpp_base, blend_lut);
+
+ return result;
+}
+
+static bool dcn30_set_mpc_shaper_3dlut(
+ struct pipe_ctx *pipe_ctx, const struct dc_stream_state *stream)
+{
+ struct dpp *dpp_base = pipe_ctx->plane_res.dpp;
+ int mpcc_id = pipe_ctx->plane_res.hubp->inst;
+ struct mpc *mpc = pipe_ctx->stream_res.opp->ctx->dc->res_pool->mpc;
+ bool result = false;
+ int acquired_rmu = 0;
+ int mpcc_id_projected = 0;
+
+ const struct pwl_params *shaper_lut = NULL;
+ //get the shaper lut params
+ if (stream->func_shaper) {
+ if (stream->func_shaper->type == TF_TYPE_HWPWL)
+ shaper_lut = &stream->func_shaper->pwl;
+ else if (stream->func_shaper->type == TF_TYPE_DISTRIBUTED_POINTS) {
+ cm_helper_translate_curve_to_hw_format(
+ stream->func_shaper,
+ &dpp_base->shaper_params, true);
+ shaper_lut = &dpp_base->shaper_params;
+ }
+ }
+
+ if (stream->lut3d_func &&
+ stream->lut3d_func->state.bits.initialized == 1 &&
+ stream->lut3d_func->state.bits.rmu_idx_valid == 1) {
+ if (stream->lut3d_func->state.bits.rmu_mux_num == 0)
+ mpcc_id_projected = stream->lut3d_func->state.bits.mpc_rmu0_mux;
+ else if (stream->lut3d_func->state.bits.rmu_mux_num == 1)
+ mpcc_id_projected = stream->lut3d_func->state.bits.mpc_rmu1_mux;
+ else if (stream->lut3d_func->state.bits.rmu_mux_num == 2)
+ mpcc_id_projected = stream->lut3d_func->state.bits.mpc_rmu2_mux;
+ if (mpcc_id_projected != mpcc_id)
+ BREAK_TO_DEBUGGER();
+ /*find the reason why logical layer assigned a differant mpcc_id into acquire_post_bldn_3dlut*/
+ acquired_rmu = mpc->funcs->acquire_rmu(mpc, mpcc_id,
+ stream->lut3d_func->state.bits.rmu_mux_num);
+ if (acquired_rmu != stream->lut3d_func->state.bits.rmu_mux_num)
+ BREAK_TO_DEBUGGER();
+ result = mpc->funcs->program_3dlut(mpc,
+ &stream->lut3d_func->lut_3d,
+ stream->lut3d_func->state.bits.rmu_mux_num);
+ result = mpc->funcs->program_shaper(mpc, shaper_lut,
+ stream->lut3d_func->state.bits.rmu_mux_num);
+ } else
+ /*loop through the available mux and release the requested mpcc_id*/
+ mpc->funcs->release_rmu(mpc, mpcc_id);
+
+
+ return result;
+}
+
+bool dcn30_set_input_transfer_func(struct dc *dc,
+ struct pipe_ctx *pipe_ctx,
+ const struct dc_plane_state *plane_state)
+{
+ struct dce_hwseq *hws = dc->hwseq;
+ struct dpp *dpp_base = pipe_ctx->plane_res.dpp;
+ enum dc_transfer_func_predefined tf;
+ bool result = true;
+ struct pwl_params *params = NULL;
+
+ if (dpp_base == NULL || plane_state == NULL)
+ return false;
+
+ tf = TRANSFER_FUNCTION_UNITY;
+
+ if (plane_state->in_transfer_func &&
+ plane_state->in_transfer_func->type == TF_TYPE_PREDEFINED)
+ tf = plane_state->in_transfer_func->tf;
+
+ dpp_base->funcs->dpp_set_pre_degam(dpp_base, tf);
+
+ if (plane_state->in_transfer_func) {
+ if (plane_state->in_transfer_func->type == TF_TYPE_HWPWL)
+ params = &plane_state->in_transfer_func->pwl;
+ else if (plane_state->in_transfer_func->type == TF_TYPE_DISTRIBUTED_POINTS &&
+ cm3_helper_translate_curve_to_hw_format(plane_state->in_transfer_func,
+ &dpp_base->degamma_params, false))
+ params = &dpp_base->degamma_params;
+ }
+
+ result = dpp_base->funcs->dpp_program_gamcor_lut(dpp_base, params);
+
+ if (pipe_ctx->stream_res.opp && pipe_ctx->stream_res.opp->ctx) {
+ if (dpp_base->funcs->dpp_program_blnd_lut)
+ hws->funcs.set_blend_lut(pipe_ctx, plane_state);
+ if (dpp_base->funcs->dpp_program_shaper_lut &&
+ dpp_base->funcs->dpp_program_3dlut)
+ hws->funcs.set_shaper_3dlut(pipe_ctx, plane_state);
+ }
+
+ return result;
+}
+
+bool dcn30_set_output_transfer_func(struct dc *dc,
+ struct pipe_ctx *pipe_ctx,
+ const struct dc_stream_state *stream)
+{
+ int mpcc_id = pipe_ctx->plane_res.hubp->inst;
+ struct mpc *mpc = pipe_ctx->stream_res.opp->ctx->dc->res_pool->mpc;
+ struct pwl_params *params = NULL;
+ bool ret = false;
+
+ /* program OGAM or 3DLUT only for the top pipe*/
+ if (pipe_ctx->top_pipe == NULL) {
+ /*program rmu shaper and 3dlut in MPC*/
+ ret = dcn30_set_mpc_shaper_3dlut(pipe_ctx, stream);
+ if (ret == false && mpc->funcs->set_output_gamma && stream->out_transfer_func) {
+ if (stream->out_transfer_func->type == TF_TYPE_HWPWL)
+ params = &stream->out_transfer_func->pwl;
+ else if (pipe_ctx->stream->out_transfer_func->type ==
+ TF_TYPE_DISTRIBUTED_POINTS &&
+ cm3_helper_translate_curve_to_hw_format(
+ stream->out_transfer_func,
+ &mpc->blender_params, false))
+ params = &mpc->blender_params;
+ /* there are no ROM LUTs in OUTGAM */
+ if (stream->out_transfer_func->type == TF_TYPE_PREDEFINED)
+ BREAK_TO_DEBUGGER();
+ }
+ }
+
+ mpc->funcs->set_output_gamma(mpc, mpcc_id, params);
+ return ret;
+}
+
+static void dcn30_set_writeback(
+ struct dc *dc,
+ struct dc_writeback_info *wb_info,
+ struct dc_state *context)
+{
+ struct dwbc *dwb;
+ struct mcif_wb *mcif_wb;
+ struct mcif_buf_params *mcif_buf_params;
+
+ ASSERT(wb_info->dwb_pipe_inst < MAX_DWB_PIPES);
+ ASSERT(wb_info->wb_enabled);
+ ASSERT(wb_info->mpcc_inst >= 0);
+ ASSERT(wb_info->mpcc_inst < 4);
+ dwb = dc->res_pool->dwbc[wb_info->dwb_pipe_inst];
+ mcif_wb = dc->res_pool->mcif_wb[wb_info->dwb_pipe_inst];
+ mcif_buf_params = &wb_info->mcif_buf_params;
+
+ /* set DWB MPC mux */
+ dc->res_pool->mpc->funcs->set_dwb_mux(dc->res_pool->mpc,
+ wb_info->dwb_pipe_inst, wb_info->mpcc_inst);
+ /* set MCIF_WB buffer and arbitration configuration */
+ mcif_wb->funcs->config_mcif_buf(mcif_wb, mcif_buf_params, wb_info->dwb_params.dest_height);
+ mcif_wb->funcs->config_mcif_arb(mcif_wb, &context->bw_ctx.bw.dcn.bw_writeback.mcif_wb_arb[wb_info->dwb_pipe_inst]);
+}
+
+void dcn30_update_writeback(
+ struct dc *dc,
+ struct dc_writeback_info *wb_info,
+ struct dc_state *context)
+{
+ struct dwbc *dwb;
+ dwb = dc->res_pool->dwbc[wb_info->dwb_pipe_inst];
+ DC_LOG_DWB("%s dwb_pipe_inst = %d, mpcc_inst = %d",\
+ __func__, wb_info->dwb_pipe_inst,\
+ wb_info->mpcc_inst);
+
+ dcn30_set_writeback(dc, wb_info, context);
+
+ /* update DWB */
+ dwb->funcs->update(dwb, &wb_info->dwb_params);
+}
+
+bool dcn30_mmhubbub_warmup(
+ struct dc *dc,
+ unsigned int num_dwb,
+ struct dc_writeback_info *wb_info)
+{
+ struct dwbc *dwb;
+ struct mcif_wb *mcif_wb;
+ struct mcif_warmup_params warmup_params = {0};
+ unsigned int i, i_buf;
+ /*make sure there is no active DWB eanbled */
+ for (i = 0; i < num_dwb; i++) {
+ dwb = dc->res_pool->dwbc[wb_info[i].dwb_pipe_inst];
+ if (dwb->dwb_is_efc_transition || dwb->dwb_is_drc) {
+ /*can not do warmup while any dwb enabled*/
+ return false;
+ }
+ }
+
+ if (wb_info->mcif_warmup_params.p_vmid == 0)
+ return false;
+
+ /*check whether this is new interface: warmup big buffer once*/
+ if (wb_info->mcif_warmup_params.start_address.quad_part != 0 &&
+ wb_info->mcif_warmup_params.region_size != 0) {
+ /*mmhubbub is shared, so it does not matter which MCIF*/
+ mcif_wb = dc->res_pool->mcif_wb[0];
+ /*warmup a big chunk of VM buffer at once*/
+ warmup_params.start_address.quad_part = wb_info->mcif_warmup_params.start_address.quad_part;
+ warmup_params.address_increment = wb_info->mcif_warmup_params.region_size;
+ warmup_params.region_size = wb_info->mcif_warmup_params.region_size;
+ warmup_params.p_vmid = wb_info->mcif_warmup_params.p_vmid;
+
+ if (warmup_params.address_increment == 0)
+ warmup_params.address_increment = dc->dml.soc.vmm_page_size_bytes;
+
+ mcif_wb->funcs->warmup_mcif(mcif_wb, &warmup_params);
+ return true;
+ }
+ /*following is the original: warmup each DWB's mcif buffer*/
+ for (i = 0; i < num_dwb; i++) {
+ dwb = dc->res_pool->dwbc[wb_info[i].dwb_pipe_inst];
+ mcif_wb = dc->res_pool->mcif_wb[wb_info[i].dwb_pipe_inst];
+ /*warmup is for VM mode only*/
+ if (wb_info[i].mcif_buf_params.p_vmid == 0)
+ return false;
+
+ /* Warmup MCIF_WB */
+ for (i_buf = 0; i_buf < MCIF_BUF_COUNT; i_buf++) {
+ warmup_params.start_address.quad_part = wb_info[i].mcif_buf_params.luma_address[i_buf];
+ warmup_params.address_increment = dc->dml.soc.vmm_page_size_bytes;
+ warmup_params.region_size = wb_info[i].mcif_buf_params.luma_pitch * wb_info[i].dwb_params.dest_height;
+ warmup_params.p_vmid = wb_info[i].mcif_buf_params.p_vmid;
+ mcif_wb->funcs->warmup_mcif(mcif_wb, &warmup_params);
+ }
+ }
+ return true;
+}
+
+void dcn30_enable_writeback(
+ struct dc *dc,
+ struct dc_writeback_info *wb_info,
+ struct dc_state *context)
+{
+ struct dwbc *dwb;
+ struct mcif_wb *mcif_wb;
+ struct timing_generator *optc;
+
+ dwb = dc->res_pool->dwbc[wb_info->dwb_pipe_inst];
+ mcif_wb = dc->res_pool->mcif_wb[wb_info->dwb_pipe_inst];
+
+ /* set the OPTC source mux */
+ optc = dc->res_pool->timing_generators[dwb->otg_inst];
+ DC_LOG_DWB("%s dwb_pipe_inst = %d, mpcc_inst = %d",\
+ __func__, wb_info->dwb_pipe_inst,\
+ wb_info->mpcc_inst);
+ if (IS_DIAG_DC(dc->ctx->dce_environment)) {
+ /*till diags switch to warmup interface*/
+ dcn30_mmhubbub_warmup(dc, 1, wb_info);
+ }
+ /* Update writeback pipe */
+ dcn30_set_writeback(dc, wb_info, context);
+
+ /* Enable MCIF_WB */
+ mcif_wb->funcs->enable_mcif(mcif_wb);
+ /* Enable DWB */
+ dwb->funcs->enable(dwb, &wb_info->dwb_params);
+}
+
+void dcn30_disable_writeback(
+ struct dc *dc,
+ unsigned int dwb_pipe_inst)
+{
+ struct dwbc *dwb;
+ struct mcif_wb *mcif_wb;
+
+ ASSERT(dwb_pipe_inst < MAX_DWB_PIPES);
+ dwb = dc->res_pool->dwbc[dwb_pipe_inst];
+ mcif_wb = dc->res_pool->mcif_wb[dwb_pipe_inst];
+ DC_LOG_DWB("%s dwb_pipe_inst = %d",\
+ __func__, dwb_pipe_inst);
+
+ /* disable DWB */
+ dwb->funcs->disable(dwb);
+ /* disable MCIF */
+ mcif_wb->funcs->disable_mcif(mcif_wb);
+ /* disable MPC DWB mux */
+ dc->res_pool->mpc->funcs->disable_dwb_mux(dc->res_pool->mpc, dwb_pipe_inst);
+}
+
+void dcn30_program_all_writeback_pipes_in_tree(
+ struct dc *dc,
+ const struct dc_stream_state *stream,
+ struct dc_state *context)
+{
+ struct dc_writeback_info wb_info;
+ struct dwbc *dwb;
+ struct dc_stream_status *stream_status = NULL;
+ int i_wb, i_pipe, i_stream;
+ DC_LOG_DWB("%s", __func__);
+
+ ASSERT(stream);
+ for (i_stream = 0; i_stream < context->stream_count; i_stream++) {
+ if (context->streams[i_stream] == stream) {
+ stream_status = &context->stream_status[i_stream];
+ break;
+ }
+ }
+ ASSERT(stream_status);
+
+ ASSERT(stream->num_wb_info <= dc->res_pool->res_cap->num_dwb);
+ /* For each writeback pipe */
+ for (i_wb = 0; i_wb < stream->num_wb_info; i_wb++) {
+
+ /* copy writeback info to local non-const so mpcc_inst can be set */
+ wb_info = stream->writeback_info[i_wb];
+ if (wb_info.wb_enabled) {
+
+ /* get the MPCC instance for writeback_source_plane */
+ wb_info.mpcc_inst = -1;
+ for (i_pipe = 0; i_pipe < dc->res_pool->pipe_count; i_pipe++) {
+ struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i_pipe];
+
+ if (pipe_ctx->plane_state == wb_info.writeback_source_plane) {
+ wb_info.mpcc_inst = pipe_ctx->plane_res.mpcc_inst;
+ break;
+ }
+ }
+ ASSERT(wb_info.mpcc_inst != -1);
+
+ ASSERT(wb_info.dwb_pipe_inst < dc->res_pool->res_cap->num_dwb);
+ dwb = dc->res_pool->dwbc[wb_info.dwb_pipe_inst];
+ if (dwb->funcs->is_enabled(dwb)) {
+ /* writeback pipe already enabled, only need to update */
+ dc->hwss.update_writeback(dc, &wb_info, context);
+ } else {
+ /* Enable writeback pipe and connect to MPCC */
+ dc->hwss.enable_writeback(dc, &wb_info, context);
+ }
+ } else {
+ /* Disable writeback pipe and disconnect from MPCC */
+ dc->hwss.disable_writeback(dc, wb_info.dwb_pipe_inst);
+ }
+ }
+}
+
+void dcn30_init_hw(struct dc *dc)
+{
+ int i, j;
+ struct abm **abms = dc->res_pool->multiple_abms;
+ struct dce_hwseq *hws = dc->hwseq;
+ struct dc_bios *dcb = dc->ctx->dc_bios;
+ struct resource_pool *res_pool = dc->res_pool;
+ uint32_t backlight = MAX_BACKLIGHT_LEVEL;
+
+ if (dc->clk_mgr && dc->clk_mgr->funcs->init_clocks)
+ dc->clk_mgr->funcs->init_clocks(dc->clk_mgr);
+
+ // Initialize the dccg
+ if (res_pool->dccg->funcs->dccg_init)
+ res_pool->dccg->funcs->dccg_init(res_pool->dccg);
+
+ if (IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment)) {
+
+ REG_WRITE(REFCLK_CNTL, 0);
+ REG_UPDATE(DCHUBBUB_GLOBAL_TIMER_CNTL, DCHUBBUB_GLOBAL_TIMER_ENABLE, 1);
+ REG_WRITE(DIO_MEM_PWR_CTRL, 0);
+
+ if (!dc->debug.disable_clock_gate) {
+ /* enable all DCN clock gating */
+ REG_WRITE(DCCG_GATE_DISABLE_CNTL, 0);
+
+ REG_WRITE(DCCG_GATE_DISABLE_CNTL2, 0);
+
+ REG_UPDATE(DCFCLK_CNTL, DCFCLK_GATE_DIS, 0);
+ }
+
+ //Enable ability to power gate / don't force power on permanently
+ if (hws->funcs.enable_power_gating_plane)
+ hws->funcs.enable_power_gating_plane(hws, true);
+
+ return;
+ }
+
+ if (!dcb->funcs->is_accelerated_mode(dcb)) {
+ hws->funcs.bios_golden_init(dc);
+ hws->funcs.disable_vga(dc->hwseq);
+ }
+
+ if (dc->ctx->dc_bios->fw_info_valid) {
+ res_pool->ref_clocks.xtalin_clock_inKhz =
+ dc->ctx->dc_bios->fw_info.pll_info.crystal_frequency;
+
+ if (!IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment)) {
+ if (res_pool->dccg && res_pool->hubbub) {
+
+ (res_pool->dccg->funcs->get_dccg_ref_freq)(res_pool->dccg,
+ dc->ctx->dc_bios->fw_info.pll_info.crystal_frequency,
+ &res_pool->ref_clocks.dccg_ref_clock_inKhz);
+
+ (res_pool->hubbub->funcs->get_dchub_ref_freq)(res_pool->hubbub,
+ res_pool->ref_clocks.dccg_ref_clock_inKhz,
+ &res_pool->ref_clocks.dchub_ref_clock_inKhz);
+ } else {
+ // Not all ASICs have DCCG sw component
+ res_pool->ref_clocks.dccg_ref_clock_inKhz =
+ res_pool->ref_clocks.xtalin_clock_inKhz;
+ res_pool->ref_clocks.dchub_ref_clock_inKhz =
+ res_pool->ref_clocks.xtalin_clock_inKhz;
+ }
+ }
+ } else
+ ASSERT_CRITICAL(false);
+
+ for (i = 0; i < dc->link_count; i++) {
+ /* Power up AND update implementation according to the
+ * required signal (which may be different from the
+ * default signal on connector).
+ */
+ struct dc_link *link = dc->links[i];
+
+ link->link_enc->funcs->hw_init(link->link_enc);
+
+ /* Check for enabled DIG to identify enabled display */
+ if (link->link_enc->funcs->is_dig_enabled &&
+ link->link_enc->funcs->is_dig_enabled(link->link_enc))
+ link->link_status.link_active = true;
+ }
+
+ /* Power gate DSCs */
+ for (i = 0; i < res_pool->res_cap->num_dsc; i++)
+ if (hws->funcs.dsc_pg_control != NULL)
+ hws->funcs.dsc_pg_control(hws, res_pool->dscs[i]->inst, false);
+
+ /* we want to turn off all dp displays before doing detection */
+ if (dc->config.power_down_display_on_boot) {
+ uint8_t dpcd_power_state = '\0';
+ enum dc_status status = DC_ERROR_UNEXPECTED;
+
+ for (i = 0; i < dc->link_count; i++) {
+ if (dc->links[i]->connector_signal != SIGNAL_TYPE_DISPLAY_PORT)
+ continue;
+
+ /* if any of the displays are lit up turn them off */
+ status = core_link_read_dpcd(dc->links[i], DP_SET_POWER,
+ &dpcd_power_state, sizeof(dpcd_power_state));
+ if (status == DC_OK && dpcd_power_state == DP_POWER_STATE_D0) {
+ /* blank dp stream before power off receiver*/
+ if (dc->links[i]->link_enc->funcs->get_dig_frontend) {
+ unsigned int fe;
+
+ fe = dc->links[i]->link_enc->funcs->get_dig_frontend(
+ dc->links[i]->link_enc);
+
+ for (j = 0; j < dc->res_pool->stream_enc_count; j++) {
+ if (fe == dc->res_pool->stream_enc[j]->id) {
+ dc->res_pool->stream_enc[j]->funcs->dp_blank(
+ dc->res_pool->stream_enc[j]);
+ break;
+ }
+ }
+ }
+ dp_receiver_power_ctrl(dc->links[i], false);
+ }
+ }
+ }
+
+ /* If taking control over from VBIOS, we may want to optimize our first
+ * mode set, so we need to skip powering down pipes until we know which
+ * pipes we want to use.
+ * Otherwise, if taking control is not possible, we need to power
+ * everything down.
+ */
+ if (dcb->funcs->is_accelerated_mode(dcb) || dc->config.power_down_display_on_boot) {
+ hws->funcs.init_pipes(dc, dc->current_state);
+ if (dc->res_pool->hubbub->funcs->allow_self_refresh_control)
+ dc->res_pool->hubbub->funcs->allow_self_refresh_control(dc->res_pool->hubbub,
+ !dc->res_pool->hubbub->ctx->dc->debug.disable_stutter);
+ }
+
+ /* In headless boot cases, DIG may be turned
+ * on which causes HW/SW discrepancies.
+ * To avoid this, power down hardware on boot
+ * if DIG is turned on and seamless boot not enabled
+ */
+ if (dc->config.power_down_display_on_boot) {
+ struct dc_link *edp_link = get_edp_link(dc);
+
+ if (edp_link &&
+ edp_link->link_enc->funcs->is_dig_enabled &&
+ edp_link->link_enc->funcs->is_dig_enabled(edp_link->link_enc) &&
+ dc->hwss.edp_backlight_control &&
+ dc->hwss.power_down &&
+ dc->hwss.edp_power_control) {
+ dc->hwss.edp_backlight_control(edp_link, false);
+ dc->hwss.power_down(dc);
+ dc->hwss.edp_power_control(edp_link, false);
+ } else {
+ for (i = 0; i < dc->link_count; i++) {
+ struct dc_link *link = dc->links[i];
+
+ if (link->link_enc->funcs->is_dig_enabled &&
+ link->link_enc->funcs->is_dig_enabled(link->link_enc) &&
+ dc->hwss.power_down) {
+ dc->hwss.power_down(dc);
+ break;
+ }
+
+ }
+ }
+ }
+
+ for (i = 0; i < res_pool->audio_count; i++) {
+ struct audio *audio = res_pool->audios[i];
+
+ audio->funcs->hw_init(audio);
+ }
+
+ for (i = 0; i < dc->link_count; i++) {
+ struct dc_link *link = dc->links[i];
+
+ if (link->panel_cntl)
+ backlight = link->panel_cntl->funcs->hw_init(link->panel_cntl);
+ }
+
+ for (i = 0; i < dc->res_pool->pipe_count; i++) {
+ if (abms[i] != NULL)
+ abms[i]->funcs->abm_init(abms[i], backlight);
+ }
+
+ /* power AFMT HDMI memory TODO: may move to dis/en output save power*/
+ REG_WRITE(DIO_MEM_PWR_CTRL, 0);
+
+ if (!dc->debug.disable_clock_gate) {
+ /* enable all DCN clock gating */
+ REG_WRITE(DCCG_GATE_DISABLE_CNTL, 0);
+
+ REG_WRITE(DCCG_GATE_DISABLE_CNTL2, 0);
+
+ REG_UPDATE(DCFCLK_CNTL, DCFCLK_GATE_DIS, 0);
+ }
+ if (hws->funcs.enable_power_gating_plane)
+ hws->funcs.enable_power_gating_plane(dc->hwseq, true);
+
+ if (dc->clk_mgr->funcs->notify_wm_ranges)
+ dc->clk_mgr->funcs->notify_wm_ranges(dc->clk_mgr);
+
+ if (dc->clk_mgr->funcs->set_hard_max_memclk)
+ dc->clk_mgr->funcs->set_hard_max_memclk(dc->clk_mgr);
+}
+
+void dcn30_set_avmute(struct pipe_ctx *pipe_ctx, bool enable)
+{
+ if (pipe_ctx == NULL)
+ return;
+
+ if (dc_is_hdmi_tmds_signal(pipe_ctx->stream->signal) && pipe_ctx->stream_res.stream_enc != NULL)
+ pipe_ctx->stream_res.stream_enc->funcs->set_avmute(
+ pipe_ctx->stream_res.stream_enc,
+ enable);
+}
+
+void dcn30_update_info_frame(struct pipe_ctx *pipe_ctx)
+{
+ bool is_hdmi_tmds;
+ bool is_dp;
+
+ ASSERT(pipe_ctx->stream);
+
+ if (pipe_ctx->stream_res.stream_enc == NULL)
+ return; /* this is not root pipe */
+
+ is_hdmi_tmds = dc_is_hdmi_tmds_signal(pipe_ctx->stream->signal);
+ is_dp = dc_is_dp_signal(pipe_ctx->stream->signal);
+
+ if (!is_hdmi_tmds)
+ return;
+
+ if (is_hdmi_tmds)
+ pipe_ctx->stream_res.stream_enc->funcs->update_hdmi_info_packets(
+ pipe_ctx->stream_res.stream_enc,
+ &pipe_ctx->stream_res.encoder_info_frame);
+ else
+ pipe_ctx->stream_res.stream_enc->funcs->update_dp_info_packets(
+ pipe_ctx->stream_res.stream_enc,
+ &pipe_ctx->stream_res.encoder_info_frame);
+}
+
+void dcn30_program_dmdata_engine(struct pipe_ctx *pipe_ctx)
+{
+ struct dc_stream_state *stream = pipe_ctx->stream;
+ struct hubp *hubp = pipe_ctx->plane_res.hubp;
+ bool enable = false;
+ struct stream_encoder *stream_enc = pipe_ctx->stream_res.stream_enc;
+ enum dynamic_metadata_mode mode = dc_is_dp_signal(stream->signal)
+ ? dmdata_dp
+ : dmdata_hdmi;
+
+ /* if using dynamic meta, don't set up generic infopackets */
+ if (pipe_ctx->stream->dmdata_address.quad_part != 0) {
+ pipe_ctx->stream_res.encoder_info_frame.hdrsmd.valid = false;
+ enable = true;
+ }
+
+ if (!hubp)
+ return;
+
+ if (!stream_enc || !stream_enc->funcs->set_dynamic_metadata)
+ return;
+
+ stream_enc->funcs->set_dynamic_metadata(stream_enc, enable,
+ hubp->inst, mode);
+}
+
+bool dcn30_apply_idle_power_optimizations(struct dc *dc, bool enable)
+{
+ unsigned int surface_size;
+
+ if (!dc->ctx->dmub_srv)
+ return false;
+
+ if (enable) {
+ if (dc->current_state
+ && dc->current_state->stream_count == 1 // single display only
+ && dc->current_state->stream_status[0].plane_count == 1 // single surface only
+ && dc->current_state->stream_status[0].plane_states[0]->address.page_table_base.quad_part == 0 // no VM
+ // Only 8 and 16 bit formats
+ && dc->current_state->stream_status[0].plane_states[0]->format <= SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616F
+ && dc->current_state->stream_status[0].plane_states[0]->format >= SURFACE_PIXEL_FORMAT_GRPH_ARGB8888) {
+
+ surface_size = dc->current_state->stream_status[0].plane_states[0]->plane_size.surface_pitch *
+ dc->current_state->stream_status[0].plane_states[0]->plane_size.surface_size.height *
+ (dc->current_state->stream_status[0].plane_states[0]->format >= SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616 ? 8 : 4);
+
+ }
+
+ return false;
+ }
+
+ return true;
+}
diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hwseq.h b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hwseq.h
new file mode 100644
index 000000000000..a4989f5ac4e9
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hwseq.h
@@ -0,0 +1,70 @@
+/*
+* Copyright 2020 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef __DC_HWSS_DCN30_H__
+#define __DC_HWSS_DCN30_H__
+
+#include "hw_sequencer_private.h"
+
+struct dc;
+
+void dcn30_init_hw(struct dc *dc);
+void dcn30_program_all_writeback_pipes_in_tree(
+ struct dc *dc,
+ const struct dc_stream_state *stream,
+ struct dc_state *context);
+void dcn30_update_writeback(
+ struct dc *dc,
+ struct dc_writeback_info *wb_info,
+ struct dc_state *context);
+void dcn30_enable_writeback(
+ struct dc *dc,
+ struct dc_writeback_info *wb_info,
+ struct dc_state *context);
+void dcn30_disable_writeback(
+ struct dc *dc,
+ unsigned int dwb_pipe_inst);
+
+bool dcn30_mmhubbub_warmup(
+ struct dc *dc,
+ unsigned int num_dwb,
+ struct dc_writeback_info *wb_info);
+
+bool dcn30_set_blend_lut(struct pipe_ctx *pipe_ctx,
+ const struct dc_plane_state *plane_state);
+
+bool dcn30_set_input_transfer_func(struct dc *dc,
+ struct pipe_ctx *pipe_ctx,
+ const struct dc_plane_state *plane_state);
+bool dcn30_set_output_transfer_func(struct dc *dc,
+ struct pipe_ctx *pipe_ctx,
+ const struct dc_stream_state *stream);
+void dcn30_set_avmute(struct pipe_ctx *pipe_ctx, bool enable);
+void dcn30_update_info_frame(struct pipe_ctx *pipe_ctx);
+void dcn30_program_dmdata_engine(struct pipe_ctx *pipe_ctx);
+
+bool dcn30_apply_idle_power_optimizations(struct dc *dc, bool enable);
+
+#endif /* __DC_HWSS_DCN30_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_init.c b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_init.c
new file mode 100644
index 000000000000..1b354c219d0a
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_init.c
@@ -0,0 +1,141 @@
+/*
+ * Copyright 2020 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#include "dce110/dce110_hw_sequencer.h"
+#include "dcn10/dcn10_hw_sequencer.h"
+#include "dcn20/dcn20_hwseq.h"
+#include "dcn30_hwseq.h"
+
+static const struct hw_sequencer_funcs dcn30_funcs = {
+ .program_gamut_remap = dcn10_program_gamut_remap,
+ .init_hw = dcn30_init_hw,
+ .apply_ctx_to_hw = dce110_apply_ctx_to_hw,
+ .apply_ctx_for_surface = NULL,
+ .program_front_end_for_ctx = dcn20_program_front_end_for_ctx,
+ .post_unlock_program_front_end = dcn20_post_unlock_program_front_end,
+ .update_plane_addr = dcn20_update_plane_addr,
+ .update_dchub = dcn10_update_dchub,
+ .update_pending_status = dcn10_update_pending_status,
+ .program_output_csc = dcn20_program_output_csc,
+ .enable_accelerated_mode = dce110_enable_accelerated_mode,
+ .enable_timing_synchronization = dcn10_enable_timing_synchronization,
+ .enable_per_frame_crtc_position_reset = dcn10_enable_per_frame_crtc_position_reset,
+ .update_info_frame = dcn30_update_info_frame,
+ .send_immediate_sdp_message = dcn10_send_immediate_sdp_message,
+ .enable_stream = dcn20_enable_stream,
+ .disable_stream = dce110_disable_stream,
+ .unblank_stream = dcn20_unblank_stream,
+ .blank_stream = dce110_blank_stream,
+ .enable_audio_stream = dce110_enable_audio_stream,
+ .disable_audio_stream = dce110_disable_audio_stream,
+ .disable_plane = dcn20_disable_plane,
+ .pipe_control_lock = dcn20_pipe_control_lock,
+ .interdependent_update_lock = dcn10_lock_all_pipes,
+ .cursor_lock = dcn10_cursor_lock,
+ .prepare_bandwidth = dcn20_prepare_bandwidth,
+ .optimize_bandwidth = dcn20_optimize_bandwidth,
+ .update_bandwidth = dcn20_update_bandwidth,
+ .set_drr = dcn10_set_drr,
+ .get_position = dcn10_get_position,
+ .set_static_screen_control = dcn10_set_static_screen_control,
+ .setup_stereo = dcn10_setup_stereo,
+ .set_avmute = dcn30_set_avmute,
+ .log_hw_state = dcn10_log_hw_state,
+ .get_hw_state = dcn10_get_hw_state,
+ .clear_status_bits = dcn10_clear_status_bits,
+ .wait_for_mpcc_disconnect = dcn10_wait_for_mpcc_disconnect,
+ .edp_power_control = dce110_edp_power_control,
+ .edp_wait_for_hpd_ready = dce110_edp_wait_for_hpd_ready,
+ .set_cursor_position = dcn10_set_cursor_position,
+ .set_cursor_attribute = dcn10_set_cursor_attribute,
+ .set_cursor_sdr_white_level = dcn10_set_cursor_sdr_white_level,
+ .setup_periodic_interrupt = dcn10_setup_periodic_interrupt,
+ .set_clock = dcn10_set_clock,
+ .get_clock = dcn10_get_clock,
+ .program_triplebuffer = dcn20_program_triple_buffer,
+ .enable_writeback = dcn30_enable_writeback,
+ .disable_writeback = dcn30_disable_writeback,
+ .update_writeback = dcn30_update_writeback,
+ .mmhubbub_warmup = dcn30_mmhubbub_warmup,
+ .dmdata_status_done = dcn20_dmdata_status_done,
+ .program_dmdata_engine = dcn30_program_dmdata_engine,
+ .set_dmdata_attributes = dcn20_set_dmdata_attributes,
+ .init_sys_ctx = dcn20_init_sys_ctx,
+ .init_vm_ctx = dcn20_init_vm_ctx,
+ .set_flip_control_gsl = dcn20_set_flip_control_gsl,
+ .get_vupdate_offset_from_vsync = dcn10_get_vupdate_offset_from_vsync,
+ .apply_idle_power_optimizations = dcn30_apply_idle_power_optimizations,
+ .set_backlight_level = dce110_set_backlight_level,
+ .set_abm_immediate_disable = dce110_set_abm_immediate_disable,
+};
+
+static const struct hwseq_private_funcs dcn30_private_funcs = {
+ .init_pipes = dcn10_init_pipes,
+ .update_plane_addr = dcn20_update_plane_addr,
+ .plane_atomic_disconnect = dcn10_plane_atomic_disconnect,
+ .update_mpcc = dcn20_update_mpcc,
+ .set_input_transfer_func = dcn30_set_input_transfer_func,
+ .set_output_transfer_func = dcn30_set_output_transfer_func,
+ .power_down = dce110_power_down,
+ .enable_display_power_gating = dcn10_dummy_display_power_gating,
+ .blank_pixel_data = dcn20_blank_pixel_data,
+ .reset_hw_ctx_wrap = dcn20_reset_hw_ctx_wrap,
+ .enable_stream_timing = dcn20_enable_stream_timing,
+ .edp_backlight_control = dce110_edp_backlight_control,
+ .disable_stream_gating = dcn20_disable_stream_gating,
+ .enable_stream_gating = dcn20_enable_stream_gating,
+ .setup_vupdate_interrupt = dcn20_setup_vupdate_interrupt,
+ .did_underflow_occur = dcn10_did_underflow_occur,
+ .init_blank = dcn20_init_blank,
+ .disable_vga = dcn20_disable_vga,
+ .bios_golden_init = dcn10_bios_golden_init,
+ .plane_atomic_disable = dcn20_plane_atomic_disable,
+ .plane_atomic_power_down = dcn10_plane_atomic_power_down,
+ .enable_power_gating_plane = dcn20_enable_power_gating_plane,
+ .dpp_pg_control = dcn20_dpp_pg_control,
+ .hubp_pg_control = dcn20_hubp_pg_control,
+ .program_all_writeback_pipes_in_tree = dcn30_program_all_writeback_pipes_in_tree,
+ .update_odm = dcn20_update_odm,
+ .dsc_pg_control = dcn20_dsc_pg_control,
+ .get_surface_visual_confirm_color = dcn10_get_surface_visual_confirm_color,
+ .get_hdr_visual_confirm_color = dcn10_get_hdr_visual_confirm_color,
+ .set_hdr_multiplier = dcn10_set_hdr_multiplier,
+ .verify_allow_pstate_change_high = dcn10_verify_allow_pstate_change_high,
+ .wait_for_blank_complete = dcn20_wait_for_blank_complete,
+ .dccg_init = dcn20_dccg_init,
+ .set_blend_lut = dcn30_set_blend_lut,
+ .set_shaper_3dlut = dcn20_set_shaper_3dlut,
+};
+
+void dcn30_hw_sequencer_construct(struct dc *dc)
+{
+ dc->hwss = dcn30_funcs;
+ dc->hwseq->funcs = dcn30_private_funcs;
+
+ if (IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment)) {
+ dc->hwss.init_hw = dcn20_fpga_init_hw;
+ dc->hwseq->funcs.init_pipes = NULL;
+ }
+}
diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_init.h b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_init.h
new file mode 100644
index 000000000000..c280ff90bfa3
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_init.h
@@ -0,0 +1,33 @@
+/*
+ * Copyright 2020 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef __DC_DCN30_INIT_H__
+#define __DC_DCN30_INIT_H__
+
+struct dc;
+
+void dcn30_hw_sequencer_construct(struct dc *dc);
+
+#endif /* __DC_DCN30_INIT_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_mmhubbub.c b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_mmhubbub.c
new file mode 100644
index 000000000000..1c4b171c68ad
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_mmhubbub.c
@@ -0,0 +1,239 @@
+/*
+ * Copyright 2020 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+
+#include "reg_helper.h"
+#include "resource.h"
+#include "mcif_wb.h"
+#include "dcn30_mmhubbub.h"
+
+
+#define REG(reg)\
+ mcif_wb30->mcif_wb_regs->reg
+
+#define CTX \
+ mcif_wb30->base.ctx
+
+#undef FN
+#define FN(reg_name, field_name) \
+ mcif_wb30->mcif_wb_shift->field_name, mcif_wb30->mcif_wb_mask->field_name
+
+#define MCIF_ADDR(addr) (((unsigned long long)addr & 0xffffffffff) + 0xFE) >> 8
+#define MCIF_ADDR_HIGH(addr) (unsigned long long)addr >> 40
+
+/* wbif programming guide:
+ * 1. set up wbif parameter:
+ * unsigned long long luma_address[4]; //4 frame buffer
+ * unsigned long long chroma_address[4];
+ * unsigned int luma_pitch;
+ * unsigned int chroma_pitch;
+ * unsigned int warmup_pitch=0x10; //256B align, the page size is 4KB when it is 0x10
+ * unsigned int slice_lines; //slice size
+ * unsigned int time_per_pixel; // time per pixel, in ns
+ * unsigned int arbitration_slice; // 0: 2048 bytes 1: 4096 bytes 2: 8192 Bytes
+ * unsigned int max_scaled_time; // used for QOS generation
+ * unsigned int swlock=0x0;
+ * unsigned int cli_watermark[4]; //4 group urgent watermark
+ * unsigned int pstate_watermark[4]; //4 group pstate watermark
+ * unsigned int sw_int_en; // Software interrupt enable, frame end and overflow
+ * unsigned int sw_slice_int_en; // slice end interrupt enable
+ * unsigned int sw_overrun_int_en; // overrun error interrupt enable
+ * unsigned int vce_int_en; // VCE interrupt enable, frame end and overflow
+ * unsigned int vce_slice_int_en; // VCE slice end interrupt enable, frame end and overflow
+ *
+ * 2. configure wbif register
+ * a. call mmhubbub_config_wbif()
+ *
+ * 3. Enable wbif
+ * call set_wbif_bufmgr_enable();
+ *
+ * 4. wbif_dump_status(), option, for debug purpose
+ * the bufmgr status can show the progress of write back, can be us